diff --git a/go.mod b/go.mod index d0c1afdef..d6271bcb9 100644 --- a/go.mod +++ b/go.mod @@ -6,24 +6,27 @@ require ( github.com/gophercloud/gophercloud v0.24.0 github.com/gophercloud/utils v0.0.0-20220307143606-8e7800759d16 github.com/onsi/ginkgo v4.7.0-origin.0+incompatible - github.com/onsi/gomega v1.10.1 - github.com/openshift/api v0.0.0-20220525145417-ee5b62754c68 + github.com/onsi/gomega v1.18.1 + github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a + github.com/openshift/cluster-api-actuator-pkg v0.0.0-20220726100012-d4179aa1372d github.com/openshift/library-go v0.0.0-20220525173854-9b950a41acdc + github.com/openshift/machine-api-provider-openstack v0.0.0-20220707045855-23e8c4523dac github.com/openshift/origin v1.5.0-alpha.3.0.20220815155142-e6728e5e1d97 github.com/spf13/cobra v1.4.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/objx v0.2.0 gopkg.in/ini.v1 v1.62.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.24.0 - k8s.io/apimachinery v0.24.0 - k8s.io/cli-runtime v0.24.0 - k8s.io/client-go v0.24.0 - k8s.io/component-base v0.24.0 + k8s.io/api v0.24.2 + k8s.io/apimachinery v0.24.2 + k8s.io/cli-runtime v0.24.1 + k8s.io/client-go v12.0.0+incompatible + k8s.io/component-base v0.24.2 k8s.io/klog/v2 v2.60.1 - k8s.io/kubectl v0.24.0 + k8s.io/kubectl v0.24.1 k8s.io/kubernetes v1.24.0 + sigs.k8s.io/controller-runtime v0.12.3 ) require ( @@ -41,7 +44,7 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3 // indirect github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect - github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.22 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect @@ -77,12 +80,12 @@ require ( github.com/emicklei/go-restful v2.9.5+incompatible // indirect github.com/emirpasic/gods v1.12.0 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect - github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/fsouza/go-dockerclient v1.7.1 // indirect github.com/ghodss/yaml v1.0.0 // indirect github.com/go-errors/errors v1.0.1 // indirect @@ -104,14 +107,14 @@ require ( github.com/google/go-cmp v0.5.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.1.2 // indirect + github.com/google/uuid v1.2.0 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/heketi/heketi v10.3.0+incompatible // indirect github.com/hpcloud/tail v1.0.0 // indirect - github.com/imdario/mergo v0.3.7 // indirect + github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -147,6 +150,8 @@ require ( github.com/opencontainers/runc v1.1.1 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect github.com/opencontainers/selinux v1.10.0 // indirect + github.com/openshift/cluster-autoscaler-operator v0.0.1-0.20220327153331-e850c080b4f5 // indirect + github.com/openshift/machine-api-operator v0.2.1-0.20220608065814-f76a8f3ab734 // indirect github.com/pborman/uuid v1.2.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect @@ -170,7 +175,7 @@ require ( github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/vishvananda/netlink v1.1.0 // indirect github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect - github.com/vmware/govmomi v0.20.3 // indirect + github.com/vmware/govmomi v0.27.4 // indirect github.com/xanzy/ssh-agent v0.2.1 // indirect github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect go.etcd.io/etcd/api/v3 v3.5.1 // indirect @@ -189,9 +194,9 @@ require ( go.opentelemetry.io/otel/trace v0.20.0 // indirect go.opentelemetry.io/proto/otlp v0.7.0 // indirect go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect - go.uber.org/atomic v1.7.0 // indirect + go.uber.org/atomic v1.8.0 // indirect go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.19.0 // indirect + go.uber.org/zap v1.19.1 // indirect golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect golang.org/x/net v0.0.0-20220526153639-5463443f8c37 // indirect golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect @@ -200,14 +205,14 @@ require ( golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/api v0.46.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect google.golang.org/grpc v1.40.0 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect - gopkg.in/gcfg.v1 v1.2.0 // indirect + gopkg.in/gcfg.v1 v1.2.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/square/go-jose.v2 v2.2.2 // indirect @@ -216,13 +221,14 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/apiextensions-apiserver v0.24.0 // indirect - k8s.io/apiserver v0.24.0 // indirect + k8s.io/apiextensions-apiserver v0.24.2 // indirect + k8s.io/apiserver v0.24.1 // indirect k8s.io/cloud-provider v0.0.0 // indirect k8s.io/component-helpers v0.0.0 // indirect k8s.io/cri-api v0.0.0 // indirect k8s.io/csi-translation-lib v0.0.0 // indirect - k8s.io/kube-aggregator v0.24.0 // indirect + k8s.io/klog v1.0.0 // indirect + k8s.io/kube-aggregator v0.24.1 // indirect k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect k8s.io/kube-proxy v0.0.0 // indirect k8s.io/kube-scheduler v0.0.0 // indirect @@ -239,7 +245,7 @@ require ( sigs.k8s.io/kustomize/api v0.11.4 // indirect sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) replace ( diff --git a/go.sum b/go.sum index 14f13a121..49448051c 100644 --- a/go.sum +++ b/go.sum @@ -70,8 +70,9 @@ github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea616 github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= @@ -89,6 +90,7 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/RangelReale/osincli v0.0.0-20160924135400-fababb0555f2/go.mod h1:XyjUkMA8GN+tOOPXvnbi3XuRxWFvTJntqvTFnjmhzbk= +github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= @@ -223,6 +225,7 @@ github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWE github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -262,9 +265,11 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= @@ -280,8 +285,9 @@ github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsouza/go-dockerclient v1.7.1 h1:OHRaYvQslCqBStrel+I3OcXZBmpoTnRCGsmH2tAk7Hk= github.com/fsouza/go-dockerclient v1.7.1/go.mod h1:PHUSk8IVIp+nkIVGrQa2GK69jPOeW/43OUKQgQcOH+M= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= @@ -308,6 +314,7 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= @@ -321,6 +328,7 @@ github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/ github.com/go-ozzo/ozzo-validation v3.5.0+incompatible h1:sUy/in/P6askYr16XJgTKq/0SZhiWsdg4WZGaLsGQkM= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -420,13 +428,15 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -482,8 +492,9 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= @@ -617,10 +628,14 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -639,12 +654,17 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/openshift/api v0.0.0-20220525145417-ee5b62754c68 h1:G4GBjFvaGlHc1dMFfJY8Z0LhMa0leRG75DvQ33PAgdY= github.com/openshift/api v0.0.0-20220525145417-ee5b62754c68/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw= +github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 h1:bkBOsI/Yd+cBT+/aXkbbNo+imvq4VKRusoCluIGOBBg= +github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7/go.mod h1:LEnw1IVscIxyDnltE3Wi7bQb/QzIM8BfPNKoGA1Qlxw= github.com/openshift/apiserver-library-go v0.0.0-20220718193426-aa2bfcc66a43/go.mod h1:0sLDTC2xy2ZKe7xvFEQ0sSbsyfmsMov+UbVVHh+ph58= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a h1:ylsEgoC8Dlg4A0C1TLH0A4x/TZao7k1YveLwROhRUdk= github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a/go.mod h1:eDO5QeVi2IiXmDwB0e2z1DpAznWroZKe978pzZwFBzg= +github.com/openshift/cluster-api-actuator-pkg v0.0.0-20220726100012-d4179aa1372d h1:rGC0cPHUwMAoLuID4zIp9W4zGGM6gXvoS2K/0BDR4b0= +github.com/openshift/cluster-api-actuator-pkg v0.0.0-20220726100012-d4179aa1372d/go.mod h1:mQmkbjJW1KDVkqbDdi49xZXuAHFydTIL7Z73E8i9kx0= +github.com/openshift/cluster-autoscaler-operator v0.0.1-0.20220327153331-e850c080b4f5 h1:D6M4GphB5rWviuFvPZm2byCNKlaCWpHE/nx5BS+7hi0= +github.com/openshift/cluster-autoscaler-operator v0.0.1-0.20220327153331-e850c080b4f5/go.mod h1:fIeQ3qwCZxhjShe2ITvy19Z43S/ywOa7L824VX+gu7w= github.com/openshift/kubernetes v1.24.1-0.20220726222740-a6c02f3f6663 h1:msQ0befPEFGb3eF1koQwdwoNUFtsMFUQDew0kvM78Mk= github.com/openshift/kubernetes v1.24.1-0.20220726222740-a6c02f3f6663/go.mod h1:+cYv4Y1v4P4fRvva4lxcnLXPb61zKN56mgmcJw57U0Q= github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220601165048-899fd9fb835c h1:Ey/3Xdnfwcflt5jIjf7H4Y80KUHpZCKL+vBrR2AW8UI= @@ -696,6 +716,10 @@ github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220 github.com/openshift/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20220601165048-899fd9fb835c/go.mod h1:3pGFFDexbXnn2yemwXyhEqCR9bq0/QwXIULKIP5M5o0= github.com/openshift/library-go v0.0.0-20220525173854-9b950a41acdc h1:j+upvKc1uLzuL+q/JXie8+IMohOooTCaEC9w+4d1Ztk= github.com/openshift/library-go v0.0.0-20220525173854-9b950a41acdc/go.mod h1:AMZwYwSdbvALDl3QobEzcJ2IeDO7DYLsr42izKzh524= +github.com/openshift/machine-api-operator v0.2.1-0.20220608065814-f76a8f3ab734 h1:EpGiFLZjakJEeXxE34XgKpJPQ/Dt7ID9ZjrhbtoMOAs= +github.com/openshift/machine-api-operator v0.2.1-0.20220608065814-f76a8f3ab734/go.mod h1:FBgKM8rkLjF2V6ZfvvscpUSFsdXzEOFAiKccqjkxgBg= +github.com/openshift/machine-api-provider-openstack v0.0.0-20220707045855-23e8c4523dac h1:/Sx8+9/2KWqweOPAgjdWCvlI0Kw+lkGpsSnuykoQtKY= +github.com/openshift/machine-api-provider-openstack v0.0.0-20220707045855-23e8c4523dac/go.mod h1:bv5dBm+HCaQk3RBWcOxPrcB0827II0skENz5eV6YJ7g= github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible h1:6XSBotNi58b4MwVV4F9o/jd4BaQd+uJyz+s5TR0/ot8= github.com/openshift/onsi-ginkgo v4.7.0-origin.0+incompatible/go.mod h1:azqkkH4Vpp9A579CC26hicol/wViXag9rOwElif6v9E= github.com/openshift/origin v1.5.0-alpha.3.0.20220815155142-e6728e5e1d97 h1:XhfU6ebHifrYY2QODInPRBp6ObXQLxdUg0TUPqhiNeE= @@ -844,8 +868,10 @@ github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/vmware/govmomi v0.27.4 h1:5kY8TAkhB20lsjzrjE073eRb8+HixBI29PVMG5lxq6I= +github.com/vmware/govmomi v0.27.4/go.mod h1:daTuJEcQosNMXYJOeku0qdBJP9SOLLWB3Mqz8THtv6o= +github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= @@ -915,17 +941,20 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/atomic v1.8.0 h1:CUhrE4N1rqSE6FM9ecihEjRkLQu8cDfgDyoOs83mEY4= +go.uber.org/atomic v1.8.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -970,7 +999,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1031,6 +1059,7 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1142,6 +1171,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1230,19 +1260,20 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717 h1:hI3jKY4Hpf63ns040onEbB3dAkR/H/P83hw1TG8dD3Y= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1377,8 +1408,9 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1433,6 +1465,7 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= @@ -1453,6 +1486,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 h1:dUk62HQ3ZFhD48Qr8MIXCiKA8wInBQCtuE4QGfFW7yA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= +sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio= +sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 h1:2sgAQQcY0dEW2SsQwTXhQV4vO6+rSslYx8K3XmM5hqQ= @@ -1469,6 +1504,7 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/test/extended/openstack/machinesets.go b/test/extended/openstack/machinesets.go new file mode 100644 index 000000000..fca8c62b7 --- /dev/null +++ b/test/extended/openstack/machinesets.go @@ -0,0 +1,135 @@ +package openstack + +import ( + "encoding/json" + "fmt" + "strings" + + g "github.com/onsi/ginkgo" + o "github.com/onsi/gomega" + + configv1 "github.com/openshift/api/config/v1" + machinev1 "github.com/openshift/api/machine/v1beta1" + framework "github.com/openshift/cluster-api-actuator-pkg/pkg/framework" + machineproviderv1 "github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1" + e2e "k8s.io/kubernetes/test/e2e/framework" + e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "k8s.io/client-go/kubernetes/scheme" +) + +var _ = g.Describe("[sig-installer][Suite:openshift/openstack] The OpenStack platform", func() { + + defer g.GinkgoRecover() + + var dc dynamic.Interface + var clientSet *kubernetes.Clientset + + g.Context("after deletion of a machineset", func() { + g.BeforeEach(func() { + g.By("preparing openshift dynamic client") + cfg, err := e2e.LoadConfig() + o.Expect(err).NotTo(o.HaveOccurred()) + dc, err = dynamic.NewForConfig(cfg) + o.Expect(err).NotTo(o.HaveOccurred()) + clientSet, err = e2e.LoadClientset() + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + g.It("should not have leftovers ports", func() { + // Check the scenario at https://bugzilla.redhat.com/show_bug.cgi?id=2073398 + + skipUnlessMachineAPIOperator(dc, clientSet.CoreV1().Namespaces()) + g.By("Fetching worker machineSets") + var networkClient *gophercloud.ServiceClient + var rawBytes []byte + var newProviderSpec machineproviderv1.OpenstackProviderSpec + var rclient runtimeclient.Client + + networkClient, err := client(serviceNetwork) + o.Expect(err).NotTo(o.HaveOccurred(), "Error creating an openstack network client") + machineSets, err := listWorkerMachineSets(dc) + o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the workers machinesets") + + if len(machineSets) == 0 { + e2eskipper.Skipf("Expects at least one worker machineset. Found none.") + } + + cfg, err := e2e.LoadConfig() + o.Expect(err).NotTo(o.HaveOccurred(), "Error getting cluster config") + + rclient, err = runtimeclient.New(cfg, runtimeclient.Options{}) + o.Expect(err).NotTo(o.HaveOccurred(), "Error creating a runtime client") + + err = machinev1.AddToScheme(scheme.Scheme) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to add Machine to scheme") + err = configv1.AddToScheme(scheme.Scheme) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to add Config to scheme") + + newMachinesetParams := framework.BuildMachineSetParams(rclient, 1) + rawBytes, err = json.Marshal(newMachinesetParams.ProviderSpec.Value) + o.Expect(err).NotTo(o.HaveOccurred(), "Error marshaling new MachineSet Provider Spec") + + err = json.Unmarshal(rawBytes, &newProviderSpec) + o.Expect(err).NotTo(o.HaveOccurred(), "Error unmarshaling new MachineSet Provider Spec") + + newProviderSpec.ServerGroupName = "" + newProviderSpec.ServerGroupID = "boo" + newMachinesetParams.Name = fmt.Sprintf("%v-%v", "bogus", RandomSuffix()) + newProviderSpecJson, err := json.Marshal(newProviderSpec) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to marshal new Machineset provider spec") + newMachinesetParams.ProviderSpec.Value.Raw = newProviderSpecJson + + g.By("Create a new machineSet with bogus server group ID") + ms, err := framework.CreateMachineSet(rclient, newMachinesetParams) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create a Machineset") + defer DeleteMachinesetsDefer(rclient, ms) + + clusterID := ms.Spec.Template.ObjectMeta.Labels["machine.openshift.io/cluster-api-cluster"] + err = GetMachinesetRetry(rclient, ms, true) + + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get the new Machineset") + + g.By("Deleting the new machineset") + framework.DeleteMachineSets(rclient, ms) + err = GetMachinesetRetry(rclient, ms, false) + o.Expect(errors.IsNotFound(err)).To(o.BeTrue(), "Machineset %v was not deleted", ms.Name) + + networkName := clusterID + "-openshift" + + networkListOpts := networks.ListOpts{Name: networkName} + networkAllPages, err := networks.List(networkClient, networkListOpts).AllPages() + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get networks") + allNetworks, err := networks.ExtractNetworks(networkAllPages) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to extract networks") + networkID := allNetworks[0].ID + e2e.Logf("Network ID: %v", networkID) + portListOpts := ports.ListOpts{ + NetworkID: networkID, + } + + g.By("Checking if an orphaned port related to the delete machineset exists") + allPages, err := ports.List(networkClient, portListOpts).AllPages() + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to get ports") + + allPorts, err := ports.ExtractPorts(allPages) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to extract ports") + + portID := "" + for _, port := range allPorts { + if strings.Contains(port.Name, newMachinesetParams.Name) { + portID = port.ID + } + } + o.Expect(portID).To(o.Equal("")) + }) + }) +}) diff --git a/test/extended/openstack/servers.go b/test/extended/openstack/servers.go index 537bfb194..5af5fef43 100644 --- a/test/extended/openstack/servers.go +++ b/test/extended/openstack/servers.go @@ -2,6 +2,7 @@ package openstack import ( "context" + "errors" "fmt" "net" "reflect" @@ -16,7 +17,7 @@ import ( g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" "github.com/stretchr/objx" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" @@ -108,15 +109,20 @@ var _ = g.Describe("[sig-installer][Suite:openshift/openstack] The OpenStack pla machines, err := getMachines(dc) o.Expect(err).NotTo(o.HaveOccurred()) + foundMachines := 0 for _, machine := range machines { g.By(fmt.Sprintf("Gather Openstack attributes for machine %q", machine.Get("metadata.name"))) instance, err := servers.Get(computeClient, machine.Get("metadata.annotations.openstack-resourceId").String()).Extract() - o.Expect(err).NotTo(o.HaveOccurred(), "Error gathering Openstack info for machine %v", machine.Get("metadata.name")) - g.By(fmt.Sprintf("Compare addresses with openstack interfaces for machine %q", instance.Name)) - o.Expect(parseInstanceAddresses(instance.Addresses)).To(o.ConsistOf(getAddressesFromMachine(machine)), "Addresses not matching for instance %q", instance.Name) + var gerr gophercloud.ErrDefault404 + if !errors.As(err, &gerr) { + o.Expect(err).NotTo(o.HaveOccurred(), "Error gathering Openstack info for machine %v", machine.Get("metadata.name")) + g.By(fmt.Sprintf("Compare addresses with openstack interfaces for machine %q", instance.Name)) + o.Expect(parseInstanceAddresses(instance.Addresses)).To(o.ConsistOf(getAddressesFromMachine(machine)), "Addresses not matching for instance %q", instance.Name) + foundMachines++ + } } + o.Expect(foundMachines).NotTo(o.Equal(0)) }) - }) }) @@ -273,7 +279,7 @@ func skipUnlessMachineAPIOperator(dc dynamic.Interface, c coreclient.NamespaceIn if err == nil { return true, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { e2eskipper.Skipf("The cluster does not support machine instances") } e2e.Logf("Unable to check for machine api operator: %v", err) @@ -286,7 +292,7 @@ func skipUnlessMachineAPIOperator(dc dynamic.Interface, c coreclient.NamespaceIn if err == nil { return true, nil } - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { e2eskipper.Skipf("The cluster machines are not managed by machine api operator") } e2e.Logf("Unable to check for machine api operator: %v", err) diff --git a/test/extended/openstack/utils.go b/test/extended/openstack/utils.go index 5184efc19..506656cf1 100644 --- a/test/extended/openstack/utils.go +++ b/test/extended/openstack/utils.go @@ -6,7 +6,10 @@ import ( "fmt" "math/rand" "strconv" + "time" + machinev1 "github.com/openshift/api/machine/v1beta1" + framework "github.com/openshift/cluster-api-actuator-pkg/pkg/framework" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -14,6 +17,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" psapi "k8s.io/pod-security-admission/api" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) type KuryrNetwork struct { @@ -108,6 +112,13 @@ func CreatePod(clientSet *kubernetes.Clientset, nsName string, baseName string, return p, err } +func DeleteMachinesetsDefer(client runtimeclient.Client, ms *machinev1.MachineSet) { + err := framework.DeleteMachineSets(client, ms) + if err != nil { + e2e.Logf("Error occured: %v", err) + } +} + // difference returns the elements in `a` that aren't in `b`. func difference(a []string, b []string) []string { mb := make(map[string]struct{}, len(b)) @@ -122,3 +133,21 @@ func difference(a []string, b []string) []string { } return diff } + +func GetMachinesetRetry(client runtimeclient.Client, ms *machinev1.MachineSet, shouldExist bool) error { + var err error + const maxRetries = 5 + const delay = 10 + retries := 1 + for retries < maxRetries { + _, err = framework.GetMachineSet(client, ms.Name) + + if err != nil == shouldExist { + retries += 1 + time.Sleep(time.Second * delay) + } else { + break + } + } + return err +} diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index 8b00e299d..1f7a91b33 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -713,6 +713,8 @@ var annotations = map[string]string{ "[Top Level] [sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete": "should ignore not found error with --for=delete [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-installer][Suite:openshift/openstack] The OpenStack platform after deletion of a machineset should not have leftovers ports": "should not have leftovers ports [Suite:openshift/conformance/parallel]", + "[Top Level] [sig-installer][Suite:openshift/openstack] The OpenStack platform creates Control plane nodes in a server group": "creates Control plane nodes in a server group [Suite:openshift/conformance/parallel]", "[Top Level] [sig-installer][Suite:openshift/openstack] The OpenStack platform creates Control plane nodes on separate hosts when serverGroupPolicy is anti-affinity": "creates Control plane nodes on separate hosts when serverGroupPolicy is anti-affinity [Suite:openshift/conformance/parallel]", diff --git a/vendor/github.com/MakeNowJust/heredoc/LICENSE b/vendor/github.com/MakeNowJust/heredoc/LICENSE index 8a58c2220..6d0eb9d5d 100644 --- a/vendor/github.com/MakeNowJust/heredoc/LICENSE +++ b/vendor/github.com/MakeNowJust/heredoc/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014-2017 TSUYUSATO Kitsune +Copyright (c) 2014-2019 TSUYUSATO Kitsune Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md index a3a65faba..e9924d297 100644 --- a/vendor/github.com/MakeNowJust/heredoc/README.md +++ b/vendor/github.com/MakeNowJust/heredoc/README.md @@ -1,4 +1,6 @@ -# heredoc [![CircleCI](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/MakeNowJust/heredoc) +# heredoc + +[![Build Status](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![GoDoc](https://godoc.org/github.com/MakeNowJusti/heredoc?status.svg)](https://godoc.org/github.com/MakeNowJust/heredoc) ## About @@ -15,8 +17,6 @@ $ go get github.com/MakeNowJust/heredoc ```go // usual import "github.com/MakeNowJust/heredoc" -// shortcuts -import . "github.com/MakeNowJust/heredoc/dot" ``` ## Example @@ -26,11 +26,11 @@ package main import ( "fmt" - . "github.com/MakeNowJust/heredoc/dot" + "github.com/MakeNowJust/heredoc" ) func main() { - fmt.Println(D(` + fmt.Println(heredoc.Doc(` Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, ... @@ -45,8 +45,7 @@ func main() { ## API Document - - [Go Walker - github.com/MakeNowJust/heredoc](https://gowalker.org/github.com/MakeNowJust/heredoc) - - [Go Walker - github.com/MakeNowJust/heredoc/dot](https://gowalker.org/github.com/MakeNowJust/heredoc/dot) + - [heredoc - GoDoc](https://godoc.org/github.com/MakeNowJust/heredoc) ## License diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go index fea12e622..1fc046955 100644 --- a/vendor/github.com/MakeNowJust/heredoc/heredoc.go +++ b/vendor/github.com/MakeNowJust/heredoc/heredoc.go @@ -1,24 +1,31 @@ -// Copyright (c) 2014-2017 TSUYUSATO Kitsune +// Copyright (c) 2014-2019 TSUYUSATO Kitsune // This software is released under the MIT License. // http://opensource.org/licenses/mit-license.php // Package heredoc provides creation of here-documents from raw strings. // // Golang supports raw-string syntax. +// // doc := ` // Foo // Bar // ` +// // But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to +// // "\n\tFoo\n\tBar\n" +// // I dont't want this! // // However this problem is solved by package heredoc. +// // doc := heredoc.Doc(` // Foo // Bar // `) +// // Is equivalent to +// // "Foo\nBar\n" package heredoc @@ -33,7 +40,7 @@ const maxInt = int(^uint(0) >> 1) // Doc returns un-indented string as here-document. func Doc(raw string) string { skipFirstLine := false - if raw[0] == '\n' { + if len(raw) > 0 && raw[0] == '\n' { raw = raw[1:] } else { skipFirstLine = true diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index dc2b7e51e..4bce5936d 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -568,29 +568,6 @@ func (p Patch) replace(doc *container, op Operation) error { return errors.Wrapf(err, "replace operation failed to decode path") } - if path == "" { - val := op.value() - - if val.which == eRaw { - if !val.tryDoc() { - if !val.tryAry() { - return errors.Wrapf(err, "replace operation value must be object or array") - } - } - } - - switch val.which { - case eAry: - *doc = &val.ary - case eDoc: - *doc = &val.doc - case eRaw: - return errors.Wrapf(err, "replace operation hit impossible case") - } - - return nil - } - con, key := findObject(doc, path) if con == nil { @@ -657,25 +634,6 @@ func (p Patch) test(doc *container, op Operation) error { return errors.Wrapf(err, "test operation failed to decode path") } - if path == "" { - var self lazyNode - - switch sv := (*doc).(type) { - case *partialDoc: - self.doc = *sv - self.which = eDoc - case *partialArray: - self.ary = *sv - self.which = eAry - } - - if self.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - con, key := findObject(doc, path) if con == nil { diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap new file mode 100644 index 000000000..a04f2907f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/.mailmap @@ -0,0 +1,2 @@ +Chris Howey +Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/.travis.yml b/vendor/github.com/fsnotify/fsnotify/.travis.yml deleted file mode 100644 index a9c30165c..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -sudo: false -language: go - -go: - - "stable" - - "1.11.x" - - "1.10.x" - - "1.9.x" - -matrix: - include: - - go: "stable" - env: GOLINT=true - allow_failures: - - go: tip - fast_finish: true - - -before_install: - - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi - -script: - - go test --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - - go vet ./... - -os: - - linux - - osx - - windows - -notifications: - email: false diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS index 5ab5d41c5..6cbabe5ef 100644 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -4,35 +4,44 @@ # You can update this list using the following command: # -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' +# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS # Please keep the list sorted. Aaron L Adrien Bustany +Alexey Kazakov Amit Krishnan Anmol Sethi Bjørn Erik Pedersen +Brian Goff Bruno Bigras Caleb Spare Case Nelson -Chris Howey +Chris Howey Christoffer Buchholz Daniel Wagner-Hall Dave Cheney +Eric Lin Evan Phoenix Francisco Souza +Gautam Dey Hari haran -John C Barstow +Ichinose Shogo +Johannes Ebke +John C Barstow Kelvin Fo Ken-ichirou MATSUZAWA Matt Layher +Matthias Stone Nathan Youngman Nickolai Zeldovich +Oliver Bristow Patrick Paul Hammond Pawel Knap Pieter Droogendijk +Pratik Shinde Pursuit92 Riku Voipio Rob Figueiredo @@ -41,6 +50,7 @@ Slawek Ligus Soge Zhang Tiffany Jernigan Tilak Sharma +Tobias Klauser Tom Payne Travis Cline Tudor Golubenco diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index be4d7ea2c..a438fe4b4 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,6 +1,28 @@ # Changelog -## v1.4.7 / 2018-01-09 +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.5.1] - 2021-08-24 + +* Revert Add AddRaw to not follow symlinks + +## [1.5.0] - 2021-08-20 + +* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) +* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) +* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) +* CI: Use GitHub Actions for CI and cover go 1.12-1.17 + [#378](https://github.com/fsnotify/fsnotify/pull/378) + [#381](https://github.com/fsnotify/fsnotify/pull/381) + [#385](https://github.com/fsnotify/fsnotify/pull/385) +* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) + +## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) * Tests: Fix missing verb on format string (thanks @rchiossi) @@ -10,62 +32,62 @@ * Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) * Docs: replace references to OS X with macOS -## v1.4.2 / 2016-10-10 +## [1.4.2] - 2016-10-10 * Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) -## v1.4.1 / 2016-10-04 +## [1.4.1] - 2016-10-04 * Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) -## v1.4.0 / 2016-10-01 +## [1.4.0] - 2016-10-01 * add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) -## v1.3.1 / 2016-06-28 +## [1.3.1] - 2016-06-28 * Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) -## v1.3.0 / 2016-04-19 +## [1.3.0] - 2016-04-19 * Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) -## v1.2.10 / 2016-03-02 +## [1.2.10] - 2016-03-02 * Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) -## v1.2.9 / 2016-01-13 +## [1.2.9] - 2016-01-13 kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) -## v1.2.8 / 2015-12-17 +## [1.2.8] - 2015-12-17 * kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) * inotify: fix race in test * enable race detection for continuous integration (Linux, Mac, Windows) -## v1.2.5 / 2015-10-17 +## [1.2.5] - 2015-10-17 * inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) * inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) * kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) * kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) -## v1.2.1 / 2015-10-14 +## [1.2.1] - 2015-10-14 * kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) -## v1.2.0 / 2015-02-08 +## [1.2.0] - 2015-02-08 * inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) * inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) * kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) -## v1.1.1 / 2015-02-05 +## [1.1.1] - 2015-02-05 * inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) -## v1.1.0 / 2014-12-12 +## [1.1.0] - 2014-12-12 * kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) * add low-level functions @@ -77,22 +99,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v1.0.4 / 2014-09-07 +## [1.0.4] - 2014-09-07 * kqueue: add dragonfly to the build tags. * Rename source code files, rearrange code so exported APIs are at the top. * Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) -## v1.0.3 / 2014-08-19 +## [1.0.3] - 2014-08-19 * [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) -## v1.0.2 / 2014-08-17 +## [1.0.2] - 2014-08-17 * [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) * [Fix] Make ./path and path equivalent. (thanks @zhsso) -## v1.0.0 / 2014-08-15 +## [1.0.0] - 2014-08-15 * [API] Remove AddWatch on Windows, use Add. * Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) @@ -146,51 +168,51 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * no tests for the current implementation * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) -## v0.9.3 / 2014-12-31 +## [0.9.3] - 2014-12-31 * kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) -## v0.9.2 / 2014-08-17 +## [0.9.2] - 2014-08-17 * [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -## v0.9.1 / 2014-06-12 +## [0.9.1] - 2014-06-12 * Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) -## v0.9.0 / 2014-01-17 +## [0.9.0] - 2014-01-17 * IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) * [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) * [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. -## v0.8.12 / 2013-11-13 +## [0.8.12] - 2013-11-13 * [API] Remove FD_SET and friends from Linux adapter -## v0.8.11 / 2013-11-02 +## [0.8.11] - 2013-11-02 * [Doc] Add Changelog [#72][] (thanks @nathany) * [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) -## v0.8.10 / 2013-10-19 +## [0.8.10] - 2013-10-19 * [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) * [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) * [Doc] specify OS-specific limits in README (thanks @debrando) -## v0.8.9 / 2013-09-08 +## [0.8.9] - 2013-09-08 * [Doc] Contributing (thanks @nathany) * [Doc] update package path in example code [#63][] (thanks @paulhammond) * [Doc] GoCI badge in README (Linux only) [#60][] * [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) -## v0.8.8 / 2013-06-17 +## [0.8.8] - 2013-06-17 * [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) -## v0.8.7 / 2013-06-03 +## [0.8.7] - 2013-06-03 * [API] Make syscall flags internal * [Fix] inotify: ignore event changes @@ -198,74 +220,74 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] tests on Windows * lower case error messages -## v0.8.6 / 2013-05-23 +## [0.8.6] - 2013-05-23 * kqueue: Use EVT_ONLY flag on Darwin * [Doc] Update README with full example -## v0.8.5 / 2013-05-09 +## [0.8.5] - 2013-05-09 * [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) -## v0.8.4 / 2013-04-07 +## [0.8.4] - 2013-04-07 * [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) -## v0.8.3 / 2013-03-13 +## [0.8.3] - 2013-03-13 * [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) * [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) -## v0.8.2 / 2013-02-07 +## [0.8.2] - 2013-02-07 * [Doc] add Authors * [Fix] fix data races for map access [#29][] (thanks @fsouza) -## v0.8.1 / 2013-01-09 +## [0.8.1] - 2013-01-09 * [Fix] Windows path separators * [Doc] BSD License -## v0.8.0 / 2012-11-09 +## [0.8.0] - 2012-11-09 * kqueue: directory watching improvements (thanks @vmirage) * inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) * [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) -## v0.7.4 / 2012-10-09 +## [0.7.4] - 2012-10-09 * [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) * [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) * [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) * [Fix] kqueue: modify after recreation of file -## v0.7.3 / 2012-09-27 +## [0.7.3] - 2012-09-27 * [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) * [Fix] kqueue: no longer get duplicate CREATE events -## v0.7.2 / 2012-09-01 +## [0.7.2] - 2012-09-01 * kqueue: events for created directories -## v0.7.1 / 2012-07-14 +## [0.7.1] - 2012-07-14 * [Fix] for renaming files -## v0.7.0 / 2012-07-02 +## [0.7.0] - 2012-07-02 * [Feature] FSNotify flags * [Fix] inotify: Added file name back to event path -## v0.6.0 / 2012-06-06 +## [0.6.0] - 2012-06-06 * kqueue: watch files after directory created (thanks @tmc) -## v0.5.1 / 2012-05-22 +## [0.5.1] - 2012-05-22 * [Fix] inotify: remove all watches before Close() -## v0.5.0 / 2012-05-03 +## [0.5.0] - 2012-05-03 * [API] kqueue: return errors during watch instead of sending over channel * kqueue: match symlink behavior on Linux @@ -273,22 +295,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn * [Fix] kqueue: handle EINTR (reported by @robfig) * [Doc] Godoc example [#1][] (thanks @davecheney) -## v0.4.0 / 2012-03-30 +## [0.4.0] - 2012-03-30 * Go 1 released: build with go tool * [Feature] Windows support using winfsnotify * Windows does not have attribute change notifications * Roll attribute notifications into IsModify -## v0.3.0 / 2012-02-19 +## [0.3.0] - 2012-02-19 * kqueue: add files when watch directory -## v0.2.0 / 2011-12-30 +## [0.2.0] - 2011-12-30 * update to latest Go weekly code -## v0.1.0 / 2011-10-19 +## [0.1.0] - 2011-10-19 * kqueue: add watch on file creation to match inotify * kqueue: create file event diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index b2629e522..df57b1b28 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -12,9 +12,9 @@ Cross platform: Windows, Linux, BSD and macOS. | Adapter | OS | Status | | --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | +| inotify | Linux 2.6.27 or later, Android\* | Supported | +| kqueue | BSD, macOS, iOS\* | Supported | +| ReadDirectoryChangesW | Windows | Supported | | FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | | FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | | fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go index ced39cb88..b3ac3d8f5 100644 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build solaris // +build solaris package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 89cab046d..0f4ee52e8 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !plan9 // +build !plan9 // Package fsnotify provides a platform-independent interface for file system notifications. diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go index d9fd1b88a..eb87699b5 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package fsnotify @@ -272,7 +273,7 @@ func (w *Watcher) readEvents() { if nameLen > 0 { // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] // The filename is padded with NULL bytes. TrimRight() gets rid of those. name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") } diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go index b33f2b4d4..e9ff9439f 100644 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build linux // +build linux package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go index 86e76a3d6..368f5b790 100644 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || openbsd || netbsd || dragonfly || darwin // +build freebsd openbsd netbsd dragonfly darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go index 2306c4620..36cc3845b 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go index 870c4d6d1..98cd8476f 100644 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin // +build darwin package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go index 09436f31d..c02b75f7c 100644 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build windows // +build windows package fsnotify diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b17461631..b404f4bec 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -26,8 +26,8 @@ var ( // NewMD5 and NewSHA1. func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { h.Reset() - h.Write(space[:]) - h.Write(data) + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck s := h.Sum(nil) var uuid UUID copy(uuid[:], s) diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go index f326b54db..2e02ec06c 100644 --- a/vendor/github.com/google/uuid/sql.go +++ b/vendor/github.com/google/uuid/sql.go @@ -9,7 +9,7 @@ import ( "fmt" ) -// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. // Currently, database types that map to string and []byte are supported. Please // consult database-specific driver documentation for matching types. func (uuid *UUID) Scan(src interface{}) error { diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 524404cc5..60d26bb50 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -35,6 +35,12 @@ const ( var rander = rand.Reader // random function +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + // Parse decodes s into a UUID or returns an error. Both the standard UUID // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the @@ -68,7 +74,7 @@ func Parse(s string) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + return uuid, invalidLengthError{len(s)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx @@ -112,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + return uuid, invalidLengthError{len(b)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index c110465db..86160fbd0 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -14,6 +14,14 @@ func New() UUID { return Must(NewRandom()) } +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + // NewRandom returns a Random (Version 4) UUID. // // The strength of the UUIDs is based on the strength of the crypto/rand diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml new file mode 100644 index 000000000..8a0681af8 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index b13a50ed1..d324c43ba 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,7 +1,12 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover - go get github.com/mattn/goveralls script: + - go test -race -v ./... +after_script: - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 02fc81e06..aa8cbd7ce 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,44 +1,54 @@ # Mergo -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). [![GoDoc][3]][4] -[![GoCard][5]][6] +[![GitHub release][5]][6] +[![GoCard][7]][8] [![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) +[![Coverage Status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA Status][13]][14] + +[![GoCenter Kudos][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo +[16]: https://search.gocenter.io/github.com/imdario/mergo -### Latest release +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com [![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) @@ -87,8 +97,9 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) -## Installation +## Install go get github.com/imdario/mergo @@ -99,7 +110,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -125,9 +136,7 @@ if err := mergo.Map(&dst, srcMap); err != nil { Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example +Here is a nice example: ```go package main @@ -175,10 +184,10 @@ import ( "time" ) -type timeTransfomer struct { +type timeTransformer struct { } -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { if typ == reflect.TypeOf(time.Time{}) { return func(dst, src reflect.Value) error { if dst.CanSet() { @@ -202,7 +211,7 @@ type Snapshot struct { func main() { src := Snapshot{time.Now()} dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go index 6e9aa7baf..fcd985f99 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/github.com/imdario/mergo/doc.go @@ -4,41 +4,140 @@ // license that can be found in the LICENSE file. /* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +Important note + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +Install + +Do your usual installation procedure: + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) Usage -From my own work-in-progress project: +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + ) - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 + type Foo struct { + A string + B int64 } - type FssnConfig struct { - Network networkConfig + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} } - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, +Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { } - // Inside a function [...] + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } - // More code [...] +Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +About + +Written by Dario Castañé: https://da.rio.hn + +License + +BSD 3-Clause license, as Go language. */ package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 3f5afa83a..a13a7ee46 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -141,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { } func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index f8de6c543..8c2a8fcd9 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -13,23 +13,39 @@ import ( "reflect" ) -func hasExportedField(dst reflect.Value) (exported bool) { +func hasMergeableFields(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasExportedField(dst.Field(i)) - } else { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { exported = exported || len(field.PkgPath) == 0 } } return } +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + type Config struct { - Overwrite bool - AppendSlice bool - Transformers Transformers - overwriteWithEmptyValue bool + Overwrite bool + AppendSlice bool + TypeCheck bool + Transformers Transformers + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool } type Transformers interface { @@ -41,8 +57,10 @@ type Transformers interface { // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite + typeCheck := config.TypeCheck overwriteWithEmptySrc := config.overwriteWithEmptyValue - config.overwriteWithEmptyValue = false + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy if !src.IsValid() { return @@ -70,21 +88,34 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch dst.Kind() { case reflect.Struct: - if hasExportedField(dst) { + if hasMergeableFields(dst) { for i, n := 0, dst.NumField(); i < n; i++ { if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } } } else { - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } } + + if src.Kind() != reflect.Map { + if overwrite { + dst.Set(src) + } + return + } + for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) if !srcElement.IsValid() { @@ -94,6 +125,9 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch srcElement.Kind() { case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } continue } fallthrough @@ -128,13 +162,34 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } dstSlice = srcSlice } else if config.AppendSlice { if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } dst.SetMapIndex(key, dstSlice) } @@ -143,7 +198,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co continue } - if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } @@ -154,22 +209,41 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !dst.CanSet() { break } - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) } dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } } case reflect.Ptr: fallthrough case reflect.Interface: - if src.IsNil() { + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } break } + if src.Kind() != reflect.Interface { - if dst.IsNil() || overwrite { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } @@ -186,18 +260,31 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } break } + if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break } default: - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) + mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } } } + return } @@ -209,7 +296,7 @@ func Merge(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, opts...) } -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by // non-empty src attribute values. // Deprecated: use Merge(…) with WithOverride func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { @@ -228,12 +315,37 @@ func WithOverride(config *Config) { config.Overwrite = true } -// WithAppendSlice will make merge append slices instead of overwriting it +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true } +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error @@ -253,3 +365,16 @@ func merge(dst, src interface{}, opts ...func(*Config)) error { } return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index a82fea2fd..3cc926c7f 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -20,6 +20,7 @@ var ( ErrNotSupported = errors.New("only structs and maps are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerAgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { } return } - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml deleted file mode 100644 index 072fdd2db..000000000 --- a/vendor/github.com/onsi/gomega/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go: - - 1.13.x - - 1.14.x - - gotip - -env: - - GO111MODULE=on - -install: - - go get -v ./... - - go build ./... - - go get github.com/onsi/ginkgo - - go install github.com/onsi/ginkgo/ginkgo - -script: make test diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 3aafdbcfc..e3b437985 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,128 @@ +## 1.18.1 + +## Fixes +- Add pointer support to HaveField matcher (#495) [79e41a3] + +## 1.18.0 + +## Features +- Docs now live on the master branch in the docs folder which will make for easier PRs. The docs also use Ginkgo 2.0's new docs html/css/js. [2570272] +- New HaveValue matcher can handle actuals that are either values (in which case they are passed on unscathed) or pointers (in which case they are indirected). [Docs here.](https://onsi.github.io/gomega/#working-with-values) (#485) [bdc087c] +- Gmeasure has been declared GA [360db9d] + +## Fixes +- Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0] + +## Maintenace +- Remove Travis workflow (#491) [72e6040] +- Upgrade to Ginkgo 2.0.0 GA [f383637] +- chore: fix description of HaveField matcher (#487) [2b4b2c0] +- use tools.go to ensure Ginkgo cli dependencies are included [f58a52b] +- remove dockerfile and simplify github actions to match ginkgo's actions [3f8160d] + +## 1.17.0 + +### Features +- Add HaveField matcher [3a26311] +- add Error() assertions on the final error value of multi-return values (#480) [2f96943] +- separate out offsets and timeouts (#478) [18a4723] +- fix transformation error reporting (#479) [e001fab] +- allow transform functions to report errors (#472) [bf93408] + +### Fixes +Stop using deprecated ioutil package (#467) [07f405d] + +## 1.16.0 + +### Features +- feat: HaveHTTPStatus multiple expected values (#465) [aa69f1b] +- feat: HaveHTTPHeaderWithValue() matcher (#463) [dd83a96] +- feat: HaveHTTPBody matcher (#462) [504e1f2] +- feat: formatter for HTTP responses (#461) [e5b3157] + +## 1.15.0 + +### Fixes +The previous version (1.14.0) introduced a change to allow `Eventually` and `Consistently` to support functions that make assertions. This was accomplished by overriding the global fail handler when running the callbacks passed to `Eventually/Consistently` in order to capture any resulting errors. Issue #457 uncovered a flaw with this approach: when multiple `Eventually`s are running concurrently they race when overriding the singleton global fail handler. + +1.15.0 resolves this by requiring users who want to make assertions in `Eventually/Consistently` call backs to explicitly pass in a function that takes a `Gomega` as an argument. The passed-in `Gomega` instance can be used to make assertions. Any failures will cause `Eventually` to retry the callback. This cleaner interface avoids the issue of swapping out globals but comes at the cost of changing the contract introduced in v1.14.0. As such 1.15.0 introduces a breaking change with respect to 1.14.0 - however we expect that adoption of this feature in 1.14.0 remains limited. + +In addition, 1.15.0 cleans up some of Gomega's internals. Most users shouldn't notice any differences stemming from the refactoring that was made. + +## 1.14.0 + +### Features +- gmeasure.SamplingConfig now suppers a MinSamplingInterval [e94dbca] +- Eventually and Consistently support functions that make assertions [2f04e6e] + - Eventually and Consistently now allow their passed-in functions to make assertions. + These assertions must pass or the function is considered to have failed and is retried. + - Eventually and Consistently can now take functions with no return values. These implicitly return nil + if they contain no failed assertion. Otherwise they return an error wrapping the first assertion failure. This allows + these functions to be used with the Succeed() matcher. + - Introduce InterceptGomegaFailure - an analogue to InterceptGomegaFailures - that captures the first assertion failure + and halts execution in its passed-in callback. + +### Fixes +- Call Verify GHTTPWithGomega receiver funcs (#454) [496e6fd] +- Build a binary with an expected name (#446) [7356360] + +## 1.13.0 + +### Features +- gmeasure provides BETA support for benchmarking (#447) [8f2dfbf] +- Set consistently and eventually defaults on init (#443) [12eb778] + +## 1.12.0 + +### Features +- Add Satisfy() matcher (#437) [c548f31] +- tweak truncation message [3360b8c] +- Add format.GomegaStringer (#427) [cc80b6f] +- Add Clear() method to gbytes.Buffer [c3c0920] + +### Fixes +- Fix error message in BeNumericallyMatcher (#432) [09c074a] +- Bump github.com/onsi/ginkgo from 1.12.1 to 1.16.2 (#442) [e5f6ea0] +- Bump github.com/golang/protobuf from 1.4.3 to 1.5.2 (#431) [adae3bf] +- Bump golang.org/x/net (#441) [3275b35] + +## 1.11.0 + +### Features +- feature: add index to gstruct element func (#419) [334e00d] +- feat(gexec) Add CompileTest functions. Close #410 (#411) [47c613f] + +### Fixes +- Check more carefully for nils in WithTransform (#423) [3c60a15] +- fix: typo in Makefile [b82522a] +- Allow WithTransform function to accept a nil value (#422) [b75d2f2] +- fix: print value type for interface{} containers (#409) [f08e2dc] +- fix(BeElementOf): consistently flatten expected values [1fa9468] + +## 1.10.5 + +### Fixes +- fix: collections matchers should display type of expectation (#408) [6b4eb5a] +- fix(ContainElements): consistently flatten expected values [073b880] +- fix(ConsistOf): consistently flatten expected values [7266efe] + +## 1.10.4 + +### Fixes +- update golang net library to more recent version without vulnerability (#406) [817a8b9] +- Correct spelling: alloted -> allotted (#403) [0bae715] +- fix a panic in MessageWithDiff with long message (#402) [ea06b9b] + +## 1.10.3 + +### Fixes +- updates golang/x/net to fix vulnerability detected by snyk (#394) [c479356] + +## 1.10.2 + +### Fixes +- Add ExpectWithOffset, EventuallyWithOffset and ConsistentlyWithOffset to WithT (#391) [990941a] + ## 1.10.1 ### Fixes diff --git a/vendor/github.com/onsi/gomega/Makefile b/vendor/github.com/onsi/gomega/Makefile deleted file mode 100644 index c92cd56e3..000000000 --- a/vendor/github.com/onsi/gomega/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -test: - [ -z "`gofmt -s -w -l -e .`" ] - go vet - ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race - -.PHONY: test diff --git a/vendor/github.com/onsi/gomega/README.md b/vendor/github.com/onsi/gomega/README.md index 76aa6b558..d45a8c4e5 100644 --- a/vendor/github.com/onsi/gomega/README.md +++ b/vendor/github.com/onsi/gomega/README.md @@ -1,6 +1,6 @@ ![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png) -[![Build Status](https://travis-ci.org/onsi/gomega.svg?branch=master)](https://travis-ci.org/onsi/gomega) +[![test](https://github.com/onsi/gomega/actions/workflows/test.yml/badge.svg)](https://github.com/onsi/gomega/actions/workflows/test.yml) Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers). diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index fae25adce..6e78c391d 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -7,6 +7,7 @@ Gomega's format package pretty-prints objects. It explores input objects recurs package format import ( + "context" "fmt" "reflect" "strconv" @@ -17,6 +18,10 @@ import ( // Use MaxDepth to set the maximum recursion depth when printing deeply nested objects var MaxDepth = uint(10) +// MaxLength of the string representation of an object. +// If MaxLength is set to 0, the Object will not be truncated. +var MaxLength = 4000 + /* By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output. @@ -44,16 +49,7 @@ var TruncateThreshold uint = 50 // after the first diff location in a truncated string assertion error message. var CharactersAroundMismatchToInclude uint = 5 -// Ctx interface defined here to keep backwards compatibility with go < 1.7 -// It matches the context.Context interface -type Ctx interface { - Deadline() (deadline time.Time, ok bool) - Done() <-chan struct{} - Err() error - Value(key interface{}) interface{} -} - -var contextType = reflect.TypeOf((*Ctx)(nil)).Elem() +var contextType = reflect.TypeOf((*context.Context)(nil)).Elem() var timeType = reflect.TypeOf(time.Time{}) //The default indentation string emitted by the format package @@ -61,6 +57,14 @@ var Indent = " " var longFormThreshold = 20 +// GomegaStringer allows for custom formating of objects for gomega. +type GomegaStringer interface { + // GomegaString will be used to custom format an object. + // It does not follow UseStringerRepresentation value and will always be called regardless. + // It also ignores the MaxLength value. + GomegaString() string +} + /* Generates a formatted matcher success/failure message of the form: @@ -105,7 +109,13 @@ func MessageWithDiff(actual, message, expected string) string { tabLength := 4 spaceFromMessageToActual := tabLength + len(": ") - len(message) - padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" + + paddingCount := spaceFromMessageToActual + spacesBeforeFormattedMismatch + if paddingCount < 0 { + return Message(formattedActual, message, formattedExpected) + } + + padding := strings.Repeat(" ", paddingCount) + "|" return Message(formattedActual, message+padding, formattedExpected) } @@ -161,6 +171,33 @@ func findFirstMismatch(a, b string) int { return 0 } +const truncateHelpText = ` +Gomega truncated this representation as it exceeds 'format.MaxLength'. +Consider having the object provide a custom 'GomegaStringer' representation +or adjust the parameters in Gomega's 'format' package. + +Learn more here: https://onsi.github.io/gomega/#adjusting-output +` + +func truncateLongStrings(s string) string { + if MaxLength > 0 && len(s) > MaxLength { + var sb strings.Builder + for i, r := range s { + if i < MaxLength { + sb.WriteRune(r) + continue + } + break + } + + sb.WriteString("...\n") + sb.WriteString(truncateHelpText) + + return sb.String() + } + return s +} + /* Pretty prints the passed in object at the passed in indentation level. @@ -175,7 +212,7 @@ Set PrintContextObjects to true to print the content of objects implementing con func Object(object interface{}, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) value := reflect.ValueOf(object) - return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation)) + return fmt.Sprintf("%s<%s>: %s", indent, formatType(value), formatValue(value, indentation)) } /* @@ -195,25 +232,20 @@ func IndentString(s string, indentation uint) string { return result } -func formatType(object interface{}) string { - t := reflect.TypeOf(object) - if t == nil { +func formatType(v reflect.Value) string { + switch v.Kind() { + case reflect.Invalid: return "nil" - } - switch t.Kind() { case reflect.Chan: - v := reflect.ValueOf(object) - return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap()) case reflect.Ptr: - return fmt.Sprintf("%T | %p", object, object) + return fmt.Sprintf("%s | 0x%x", v.Type(), v.Pointer()) case reflect.Slice: - v := reflect.ValueOf(object) - return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap()) + return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap()) case reflect.Map: - v := reflect.ValueOf(object) - return fmt.Sprintf("%T | len:%d", object, v.Len()) + return fmt.Sprintf("%s | len:%d", v.Type(), v.Len()) default: - return fmt.Sprintf("%T", object) + return fmt.Sprintf("%s", v.Type()) } } @@ -226,14 +258,21 @@ func formatValue(value reflect.Value, indentation uint) string { return "nil" } - if UseStringerRepresentation { - if value.CanInterface() { - obj := value.Interface() + if value.CanInterface() { + obj := value.Interface() + + // GomegaStringer will take precedence to other representations and disregards UseStringerRepresentation + if x, ok := obj.(GomegaStringer); ok { + // do not truncate a user-defined GoMegaString() value + return x.GomegaString() + } + + if UseStringerRepresentation { switch x := obj.(type) { case fmt.GoStringer: - return x.GoString() + return truncateLongStrings(x.GoString()) case fmt.Stringer: - return x.String() + return truncateLongStrings(x.String()) } } } @@ -264,26 +303,26 @@ func formatValue(value reflect.Value, indentation uint) string { case reflect.Ptr: return formatValue(value.Elem(), indentation) case reflect.Slice: - return formatSlice(value, indentation) + return truncateLongStrings(formatSlice(value, indentation)) case reflect.String: - return formatString(value.String(), indentation) + return truncateLongStrings(formatString(value.String(), indentation)) case reflect.Array: - return formatSlice(value, indentation) + return truncateLongStrings(formatSlice(value, indentation)) case reflect.Map: - return formatMap(value, indentation) + return truncateLongStrings(formatMap(value, indentation)) case reflect.Struct: if value.Type() == timeType && value.CanInterface() { t, _ := value.Interface().(time.Time) return t.Format(time.RFC3339Nano) } - return formatStruct(value, indentation) + return truncateLongStrings(formatStruct(value, indentation)) case reflect.Interface: - return formatValue(value.Elem(), indentation) + return formatInterface(value, indentation) default: if value.CanInterface() { - return fmt.Sprintf("%#v", value.Interface()) + return truncateLongStrings(fmt.Sprintf("%#v", value.Interface())) } - return fmt.Sprintf("%#v", value) + return truncateLongStrings(fmt.Sprintf("%#v", value)) } } @@ -373,6 +412,10 @@ func formatStruct(v reflect.Value, indentation uint) string { return fmt.Sprintf("{%s}", strings.Join(result, ", ")) } +func formatInterface(v reflect.Value, indentation uint) string { + return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation)) +} + func isNilValue(a reflect.Value) bool { switch a.Kind() { case reflect.Invalid: diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 8ff9611d5..6936e2411 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -14,101 +14,148 @@ Gomega is MIT-Licensed package gomega import ( + "errors" "fmt" - "reflect" "time" - "github.com/onsi/gomega/internal/assertion" - "github.com/onsi/gomega/internal/asyncassertion" - "github.com/onsi/gomega/internal/testingtsupport" + "github.com/onsi/gomega/internal" "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.10.1" +const GOMEGA_VERSION = "1.18.1" -const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. +const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT(). Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations. ` -var globalFailWrapper *types.GomegaFailWrapper - -var defaultEventuallyTimeout = time.Second -var defaultEventuallyPollingInterval = 10 * time.Millisecond -var defaultConsistentlyDuration = 100 * time.Millisecond -var defaultConsistentlyPollingInterval = 10 * time.Millisecond - -// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails -// the fail handler passed into RegisterFailHandler is called. -func RegisterFailHandler(handler types.GomegaFailHandler) { - RegisterFailHandlerWithT(testingtsupport.EmptyTWithHelper{}, handler) -} - -// RegisterFailHandlerWithT ensures that the given types.TWithHelper and fail handler -// are used globally. -func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) { - if handler == nil { - globalFailWrapper = nil - return - } +// Gomega describes the essential Gomega DSL. This interface allows libraries +// to abstract between the standard package-level function implementations +// and alternatives like *WithT. +// +// The types in the top-level DSL have gotten a bit messy due to earlier depracations that avoid stuttering +// and due to an accidental use of a concrete type (*WithT) in an earlier release. +// +// As of 1.15 both the WithT and Ginkgo variants of Gomega are implemented by the same underlying object +// however one (the Ginkgo variant) is exported as an interface (types.Gomega) whereas the other (the withT variant) +// is shared as a concrete type (*WithT, which is aliased to *internal.Gomega). 1.15 did not clean this mess up to ensure +// that declarations of *WithT in existing code are not broken by the upgrade to 1.15. +type Gomega = types.Gomega - globalFailWrapper = &types.GomegaFailWrapper{ - Fail: handler, - TWithHelper: t, - } +// DefaultGomega supplies the standard package-level implementation +var Default = Gomega(internal.NewGomega(internal.FetchDefaultDurationBundle())) + +// NewGomega returns an instance of Gomega wired into the passed-in fail handler. +// You generally don't need to use this when using Ginkgo - RegisterFailHandler will wire up the global gomega +// However creating a NewGomega with a custom fail handler can be useful in contexts where you want to use Gomega's +// rich ecosystem of matchers without causing a test to fail. For example, to aggregate a series of potential failures +// or for use in a non-test setting. +func NewGomega(fail types.GomegaFailHandler) Gomega { + return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithFailHandler(fail) } -// RegisterTestingT connects Gomega to Golang's XUnit style -// Testing.T tests. It is now deprecated and you should use NewWithT() instead. +// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage +// Gomega's rich ecosystem of matchers in standard `testing` test suites. // -// Legacy Documentation: +// Use `NewWithT` to instantiate a `WithT` // -// You'll need to call this at the top of each XUnit style test: +// As of 1.15 both the WithT and Ginkgo variants of Gomega are implemented by the same underlying object +// however one (the Ginkgo variant) is exported as an interface (types.Gomega) whereas the other (the withT variant) +// is shared as a concrete type (*WithT, which is aliased to *internal.Gomega). 1.15 did not clean this mess up to ensure +// that declarations of *WithT in existing code are not broken by the upgrade to 1.15. +type WithT = internal.Gomega + +// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter. +type GomegaWithT = WithT + +// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with +// Gomega's rich ecosystem of matchers in standard `testing` test suits. // // func TestFarmHasCow(t *testing.T) { -// RegisterTestingT(t) +// g := gomega.NewWithT(t) // // f := farm.New([]string{"Cow", "Horse"}) -// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") -// } -// -// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to -// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests -// in parallel as the global fail handler cannot point to more than one testing.T at a time. -// -// NewWithT() does not have this limitation -// -// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). +// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } +func NewWithT(t types.GomegaTestingT) *WithT { + return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithT(t) +} + +// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter. +var NewGomegaWithT = NewWithT + +// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails +// the fail handler passed into RegisterFailHandler is called. +func RegisterFailHandler(fail types.GomegaFailHandler) { + Default.(*internal.Gomega).ConfigureWithFailHandler(fail) +} + +// RegisterFailHandlerWithT is deprecated and will be removed in a future release. +// users should use RegisterFailHandler, or RegisterTestingT +func RegisterFailHandlerWithT(_ types.GomegaTestingT, fail types.GomegaFailHandler) { + fmt.Println("RegisterFailHandlerWithT is deprecated. Please use RegisterFailHandler or RegisterTestingT instead.") + Default.(*internal.Gomega).ConfigureWithFailHandler(fail) +} + +// RegisterTestingT connects Gomega to Golang's XUnit style +// Testing.T tests. It is now deprecated and you should use NewWithT() instead to get a fresh instance of Gomega for each test. func RegisterTestingT(t types.GomegaTestingT) { - tWithHelper, hasHelper := t.(types.TWithHelper) - if !hasHelper { - RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) - return - } - RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail) + Default.(*internal.Gomega).ConfigureWithT(t) } // InterceptGomegaFailures runs a given callback and returns an array of // failure messages generated by any Gomega assertions within the callback. -// -// This is accomplished by temporarily replacing the *global* fail handler -// with a fail handler that simply annotates failures. The original fail handler -// is reset when InterceptGomegaFailures returns. +// Exeuction continues after the first failure allowing users to collect all failures +// in the callback. // // This is most useful when testing custom matchers, but can also be used to check // on a value using a Gomega assertion without causing a test failure. func InterceptGomegaFailures(f func()) []string { - originalHandler := globalFailWrapper.Fail + originalHandler := Default.(*internal.Gomega).Fail failures := []string{} - RegisterFailHandler(func(message string, callerSkip ...int) { + Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) { failures = append(failures, message) - }) + } + defer func() { + Default.(*internal.Gomega).Fail = originalHandler + }() f() - RegisterFailHandler(originalHandler) return failures } +// InterceptGomegaFailure runs a given callback and returns the first +// failure message generated by any Gomega assertions within the callback, wrapped in an error. +// +// The callback ceases execution as soon as the first failed assertion occurs, however Gomega +// does not register a failure with the FailHandler registered via RegisterFailHandler - it is up +// to the user to decide what to do with the returned error +func InterceptGomegaFailure(f func()) (err error) { + originalHandler := Default.(*internal.Gomega).Fail + Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) { + err = errors.New(message) + panic("stop execution") + } + + defer func() { + Default.(*internal.Gomega).Fail = originalHandler + if e := recover(); e != nil { + if err == nil { + panic(e) + } + } + }() + + f() + return err +} + +func ensureDefaultGomegaIsConfigured() { + if !Default.(*internal.Gomega).IsConfigured() { + panic(nilGomegaPanic) + } +} + // Ω wraps an actual value allowing assertions to be made on it: // Ω("foo").Should(Equal("foo")) // @@ -127,7 +174,8 @@ func InterceptGomegaFailures(f func()) []string { // // Ω and Expect are identical func Ω(actual interface{}, extra ...interface{}) Assertion { - return ExpectWithOffset(0, actual, extra...) + ensureDefaultGomegaIsConfigured() + return Default.Ω(actual, extra...) } // Expect wraps an actual value allowing assertions to be made on it: @@ -148,146 +196,183 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { // // Expect and Ω are identical func Expect(actual interface{}, extra ...interface{}) Assertion { - return ExpectWithOffset(0, actual, extra...) + ensureDefaultGomegaIsConfigured() + return Default.Expect(actual, extra...) } // ExpectWithOffset wraps an actual value allowing assertions to be made on it: // ExpectWithOffset(1, "foo").To(Equal("foo")) // // Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument -// that is used to modify the call-stack offset when computing line numbers. +// that is used to modify the call-stack offset when computing line numbers. It is +// the same as `Expect(...).WithOffset`. // // This is most useful in helper functions that make assertions. If you want Gomega's // error message to refer to the calling line in the test (as opposed to the line in the helper function) // set the first argument of `ExpectWithOffset` appropriately. func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { - if globalFailWrapper == nil { - panic(nilFailHandlerPanic) - } - return assertion.New(actual, globalFailWrapper, offset, extra...) + ensureDefaultGomegaIsConfigured() + return Default.ExpectWithOffset(offset, actual, extra...) } -// Eventually wraps an actual value allowing assertions to be made on it. -// The assertion is tried periodically until it passes or a timeout occurs. -// -// Both the timeout and polling interval are configurable as optional arguments: -// The first optional argument is the timeout -// The second optional argument is the polling interval -// -// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the -// last case they are interpreted as seconds. -// -// If Eventually is passed an actual that is a function taking no arguments and returning at least one value, -// then Eventually will call the function periodically and try the matcher against the function's first return value. -// -// Example: -// -// Eventually(func() int { -// return thingImPolling.Count() -// }).Should(BeNumerically(">=", 17)) -// -// Note that this example could be rewritten: -// -// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) -// -// If the function returns more than one value, then Eventually will pass the first value to the matcher and -// assert that all other values are nil/zero. -// This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. -// -// For example, consider a method that returns a value and an error: -// func FetchFromDB() (string, error) -// -// Then -// Eventually(FetchFromDB).Should(Equal("hasselhoff")) -// -// Will pass only if the the returned error is nil and the returned string passes the matcher. -// -// Eventually's default timeout is 1 second, and its default polling interval is 10ms +/* +Eventually enables making assertions on asynchronous behavior. + +Eventually checks that an assertion *eventually* passes. Eventually blocks when called and attempts an assertion periodically until it passes or a timeout occurs. Both the timeout and polling interval are configurable as optional arguments. +The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). + +Eventually works with any Gomega compatible matcher and supports making assertions against three categories of actual value: + +**Category 1: Making Eventually assertions on values** + +There are several examples of values that can change over time. These can be passed in to Eventually and will be passed to the matcher repeatedly until a match occurs. For example: + + c := make(chan bool) + go DoStuff(c) + Eventually(c, "50ms").Should(BeClosed()) + +will poll the channel repeatedly until it is closed. In this example `Eventually` will block until either the specified timeout of 50ms has elapsed or the channel is closed, whichever comes first. + +Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfuly via: + + Eventually(session).Should(gexec.Exit(0)) + +And the gomega/gbytes package allows you to monitor a streaming *gbytes.Buffer until a given string is seen: + + Eventually(buffer).Should(gbytes.Say("hello there")) + +In these examples, both `session` and `buffer` are designed to be thread-safe when polled by the `Exit` and `Say` matchers. This is not true in general of most raw values, so while it is tempting to do something like: + + // THIS IS NOT THREAD-SAFE + var s *string + go mutateStringEventually(s) + Eventually(s).Should(Equal("I've changed")) + +this will trigger Go's race detector as the goroutine polling via Eventually will race over the value of s with the goroutine mutating the string. For cases like this you can use channels or introduce your own locking around s by passing Eventually a function. + +**Category 2: Make Eventually assertions on functions** + +Eventually can be passed functions that **take no arguments** and **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher. + +For example: + + Eventually(func() int { + return client.FetchCount() + }).Should(BeNumerically(">=", 17)) + + will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17))) + +If multple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common patternin Go. + +For example, consider a method that returns a value and an error: + func FetchFromDB() (string, error) + +Then + Eventually(FetchFromDB).Should(Equal("got it")) + +will pass only if and when the returned error is nil *and* the returned string satisfies the matcher. + +It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. You should design your functions with this in mind. + +**Category 3: Making assertions _in_ the function passed into Eventually** + +When testing complex systems it can be valuable to assert that a _set_ of assertions passes Eventually. Eventually supports this by accepting functions that take a single Gomega argument and return zero or more values. + +Here's an example that makes some asssertions and returns a value and error: + + Eventually(func(g Gomega) (Widget, error) { + ids, err := client.FetchIDs() + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(ids).To(ContainElement(1138)) + return client.FetchWidget(1138) + }).Should(Equal(expectedWidget)) + +will pass only if all the assertions in the polled function pass and the return value satisfied the matcher. + +Eventually also supports a special case polling function that takes a single Gomega argument and returns no values. Eventually assumes such a function is making assertions and is designed to work with the Succeed matcher to validate that all assertions have passed. +For example: + + Eventually(func(g Gomega) { + model, err := client.Find(1138) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(model.Reticulate()).To(Succeed()) + g.Expect(model.IsReticulated()).To(BeTrue()) + g.Expect(model.Save()).To(Succeed()) + }).Should(Succeed()) + +will rerun the function until all assertions pass. + +`Eventually` specifying a timeout interval (and an optional polling interval) are +the same as `Eventually(...).WithTimeout` or `Eventually(...).WithTimeout(...).WithPolling`. +*/ func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { - return EventuallyWithOffset(0, actual, intervals...) + ensureDefaultGomegaIsConfigured() + return Default.Eventually(actual, intervals...) } // EventuallyWithOffset operates like Eventually but takes an additional // initial argument to indicate an offset in the call stack. This is useful when building helper // functions that contain matchers. To learn more, read about `ExpectWithOffset`. +// +// `EventuallyWithOffset` is the same as `Eventually(...).WithOffset`. +// +// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are +// the same as `Eventually(...).WithOffset(...).WithTimeout` or +// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`. func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { - if globalFailWrapper == nil { - panic(nilFailHandlerPanic) - } - timeoutInterval := defaultEventuallyTimeout - pollingInterval := defaultEventuallyPollingInterval - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) + ensureDefaultGomegaIsConfigured() + return Default.EventuallyWithOffset(offset, actual, intervals...) } -// Consistently wraps an actual value allowing assertions to be made on it. -// The assertion is tried periodically and is required to pass for a period of time. -// -// Both the total time and polling interval are configurable as optional arguments: -// The first optional argument is the duration that Consistently will run for -// The second optional argument is the polling interval -// -// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the -// last case they are interpreted as seconds. -// -// If Consistently is passed an actual that is a function taking no arguments and returning at least one value, -// then Consistently will call the function periodically and try the matcher against the function's first return value. -// -// If the function returns more than one value, then Consistently will pass the first value to the matcher and -// assert that all other values are nil/zero. -// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. -// -// Consistently is useful in cases where you want to assert that something *does not happen* over a period of time. -// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: -// -// Consistently(channel).ShouldNot(Receive()) -// -// Consistently's default duration is 100ms, and its default polling interval is 10ms +/* +Consistently, like Eventually, enables making assertions on asynchronous behavior. + +Consistently blocks when called for a specified duration. During that duration Consistently repeatedly polls its matcher and ensures that it is satisfied. If the matcher is consistently satisfied, then Consistently will pass. Otherwise Consistently will fail. + +Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional arugment is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. + +Consistently accepts the same three categories of actual as Eventually, check the Eventually docs to learn more. + +Consistently is useful in cases where you want to assert that something *does not happen* for a period of time. For example, you may want to assert that a goroutine does *not* send data down a channel. In this case you could write: + + Consistently(channel, "200ms").ShouldNot(Receive()) + +This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received. +*/ func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { - return ConsistentlyWithOffset(0, actual, intervals...) + ensureDefaultGomegaIsConfigured() + return Default.Consistently(actual, intervals...) } // ConsistentlyWithOffset operates like Consistently but takes an additional // initial argument to indicate an offset in the call stack. This is useful when building helper // functions that contain matchers. To learn more, read about `ExpectWithOffset`. +// +// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and +// optional `WithTimeout` and `WithPolling`. func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { - if globalFailWrapper == nil { - panic(nilFailHandlerPanic) - } - timeoutInterval := defaultConsistentlyDuration - pollingInterval := defaultConsistentlyPollingInterval - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset) + ensureDefaultGomegaIsConfigured() + return Default.ConsistentlyWithOffset(offset, actual, intervals...) } // SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. func SetDefaultEventuallyTimeout(t time.Duration) { - defaultEventuallyTimeout = t + Default.SetDefaultEventuallyTimeout(t) } // SetDefaultEventuallyPollingInterval sets the default polling interval for Eventually. func SetDefaultEventuallyPollingInterval(t time.Duration) { - defaultEventuallyPollingInterval = t + Default.SetDefaultEventuallyPollingInterval(t) } // SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satisfied for this long. func SetDefaultConsistentlyDuration(t time.Duration) { - defaultConsistentlyDuration = t + Default.SetDefaultConsistentlyDuration(t) } // SetDefaultConsistentlyPollingInterval sets the default polling interval for Consistently. func SetDefaultConsistentlyPollingInterval(t time.Duration) { - defaultConsistentlyPollingInterval = t + Default.SetDefaultConsistentlyPollingInterval(t) } // AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against @@ -305,13 +390,10 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { // // Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") // Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) -type AsyncAssertion interface { - Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool -} +type AsyncAssertion = types.AsyncAssertion // GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. -type GomegaAsyncAssertion = AsyncAssertion +type GomegaAsyncAssertion = types.AsyncAssertion // Assertion is returned by Ω and Expect and compares the actual value to the matcher // passed to the Should/ShouldNot and To/ToNot/NotTo methods. @@ -330,134 +412,10 @@ type GomegaAsyncAssertion = AsyncAssertion // Example: // // Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) -type Assertion interface { - Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - - To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool - NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool -} +type Assertion = types.Assertion // GomegaAssertion is deprecated in favor of Assertion, which does not stutter. -type GomegaAssertion = Assertion +type GomegaAssertion = types.Assertion // OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it -type OmegaMatcher types.GomegaMatcher - -// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage -// Gomega's rich ecosystem of matchers in standard `testing` test suites. -// -// Use `NewWithT` to instantiate a `WithT` -type WithT struct { - t types.GomegaTestingT -} - -// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter. -type GomegaWithT = WithT - -// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with -// Gomega's rich ecosystem of matchers in standard `testing` test suits. -// -// func TestFarmHasCow(t *testing.T) { -// g := gomega.NewWithT(t) -// -// f := farm.New([]string{"Cow", "Horse"}) -// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") -// } -func NewWithT(t types.GomegaTestingT) *WithT { - return &WithT{ - t: t, - } -} - -// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter. -func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT { - return NewWithT(t) -} - -// Expect is used to make assertions. See documentation for Expect. -func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion { - return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...) -} - -// Eventually is used to make asynchronous assertions. See documentation for Eventually. -func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { - timeoutInterval := defaultEventuallyTimeout - pollingInterval := defaultEventuallyPollingInterval - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) -} - -// Consistently is used to make asynchronous assertions. See documentation for Consistently. -func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { - timeoutInterval := defaultConsistentlyDuration - pollingInterval := defaultConsistentlyPollingInterval - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } - return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0) -} - -func toDuration(input interface{}) time.Duration { - duration, ok := input.(time.Duration) - if ok { - return duration - } - - value := reflect.ValueOf(input) - kind := reflect.TypeOf(input).Kind() - - if reflect.Int <= kind && kind <= reflect.Int64 { - return time.Duration(value.Int()) * time.Second - } else if reflect.Uint <= kind && kind <= reflect.Uint64 { - return time.Duration(value.Uint()) * time.Second - } else if reflect.Float32 <= kind && kind <= reflect.Float64 { - return time.Duration(value.Float() * float64(time.Second)) - } else if reflect.String == kind { - duration, err := time.ParseDuration(value.String()) - if err != nil { - panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) - } - return duration - } - - panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) -} - -// Gomega describes the essential Gomega DSL. This interface allows libraries -// to abstract between the standard package-level function implementations -// and alternatives like *WithT. -type Gomega interface { - Expect(actual interface{}, extra ...interface{}) Assertion - Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion - Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion -} - -type globalFailHandlerGomega struct{} - -// DefaultGomega supplies the standard package-level implementation -var Default Gomega = globalFailHandlerGomega{} - -// Expect is used to make assertions. See documentation for Expect. -func (globalFailHandlerGomega) Expect(actual interface{}, extra ...interface{}) Assertion { - return Expect(actual, extra...) -} - -// Eventually is used to make asynchronous assertions. See documentation for Eventually. -func (globalFailHandlerGomega) Eventually(actual interface{}, extra ...interface{}) AsyncAssertion { - return Eventually(actual, extra...) -} - -// Consistently is used to make asynchronous assertions. See documentation for Consistently. -func (globalFailHandlerGomega) Consistently(actual interface{}, extra ...interface{}) AsyncAssertion { - return Consistently(actual, extra...) -} +type OmegaMatcher = types.GomegaMatcher diff --git a/vendor/github.com/onsi/gomega/gstruct/elements.go b/vendor/github.com/onsi/gomega/gstruct/elements.go index 30e3369e0..b5e5ef2e4 100644 --- a/vendor/github.com/onsi/gomega/gstruct/elements.go +++ b/vendor/github.com/onsi/gomega/gstruct/elements.go @@ -7,6 +7,7 @@ import ( "fmt" "reflect" "runtime/debug" + "strconv" "github.com/onsi/gomega/format" errorsutil "github.com/onsi/gomega/gstruct/errors" @@ -30,6 +31,23 @@ func MatchAllElements(identifier Identifier, elements Elements) types.GomegaMatc } } +//MatchAllElementsWithIndex succeeds if every element of a slice matches the element matcher it maps to +//through the id with index function, and every element matcher is matched. +// idFn := func(index int, element interface{}) string { +// return strconv.Itoa(index) +// } +// +// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{ +// "0": Equal("a"), +// "1": Equal("b"), +// })) +func MatchAllElementsWithIndex(identifier IdentifierWithIndex, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + } +} + //MatchElements succeeds if each element of a slice matches the element matcher it maps to //through the id function. It can ignore extra elements and/or missing elements. // idFn := func(element interface{}) string { @@ -56,6 +74,32 @@ func MatchElements(identifier Identifier, options Options, elements Elements) ty } } +//MatchElementsWithIndex succeeds if each element of a slice matches the element matcher it maps to +//through the id with index function. It can ignore extra elements and/or missing elements. +// idFn := func(index int, element interface{}) string { +// return strconv.Itoa(index) +// } +// +// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{ +// "0": Equal("a"), +// "1": Equal("b"), +// })) +// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{ +// "0": Equal("a"), +// "1": Equal("b"), +// "2": Equal("c"), +// "3": Equal("d"), +// })) +func MatchElementsWithIndex(identifier IdentifierWithIndex, options Options, elements Elements) types.GomegaMatcher { + return &ElementsMatcher{ + Identifier: identifier, + Elements: elements, + IgnoreExtras: options&IgnoreExtras != 0, + IgnoreMissing: options&IgnoreMissing != 0, + AllowDuplicates: options&AllowDuplicates != 0, + } +} + // ElementsMatcher is a NestingMatcher that applies custom matchers to each element of a slice mapped // by the Identifier function. // TODO: Extend this to work with arrays & maps (map the key) as well. @@ -63,7 +107,7 @@ type ElementsMatcher struct { // Matchers for each element. Elements Elements // Function mapping an element to the string key identifying its matcher. - Identifier Identifier + Identifier Identify // Whether to ignore extra elements or consider it an error. IgnoreExtras bool @@ -82,6 +126,32 @@ type Elements map[string]types.GomegaMatcher // Function for identifying (mapping) elements. type Identifier func(element interface{}) string +// Calls the underlying fucntion with the provided params. +// Identifier drops the index. +func (i Identifier) WithIndexAndElement(index int, element interface{}) string { + return i(element) +} + +// Uses the index and element to generate an element name +type IdentifierWithIndex func(index int, element interface{}) string + +// Calls the underlying fucntion with the provided params. +// IdentifierWithIndex uses the index. +func (i IdentifierWithIndex) WithIndexAndElement(index int, element interface{}) string { + return i(index, element) +} + +// Interface for identifing the element +type Identify interface { + WithIndexAndElement(i int, element interface{}) string +} + +// IndexIdentity is a helper function for using an index as +// the key in the element map +func IndexIdentity(index int, _ interface{}) string { + return strconv.Itoa(index) +} + func (m *ElementsMatcher) Match(actual interface{}) (success bool, err error) { if reflect.TypeOf(actual).Kind() != reflect.Slice { return false, fmt.Errorf("%v is type %T, expected slice", actual, actual) @@ -106,7 +176,7 @@ func (m *ElementsMatcher) matchElements(actual interface{}) (errs []error) { elements := map[string]bool{} for i := 0; i < val.Len(); i++ { element := val.Index(i).Interface() - id := m.Identifier(element) + id := m.Identifier.WithIndexAndElement(i, element) if elements[id] { if !m.AllowDuplicates { errs = append(errs, fmt.Errorf("found duplicate element ID %s", id)) diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go new file mode 100644 index 000000000..b3c26889a --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/assertion.go @@ -0,0 +1,150 @@ +package internal + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/types" +) + +type Assertion struct { + actuals []interface{} // actual value plus all extra values + actualIndex int // value to pass to the matcher + vet vetinari // the vet to call before calling Gomega matcher + offset int + g *Gomega +} + +// ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right. +type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool + +func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion { + return &Assertion{ + actuals: append([]interface{}{actualInput}, extra...), + actualIndex: 0, + vet: (*Assertion).vetActuals, + offset: offset, + g: g, + } +} + +func (assertion *Assertion) WithOffset(offset int) types.Assertion { + assertion.offset = offset + return assertion +} + +func (assertion *Assertion) Error() types.Assertion { + return &Assertion{ + actuals: assertion.actuals, + actualIndex: len(assertion.actuals) - 1, + vet: (*Assertion).vetError, + offset: assertion.offset, + g: assertion.g, + } +} + +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.g.THelper() + return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.g.THelper() + return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.g.THelper() + return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.g.THelper() + return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.g.THelper() + return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + case 1: + if describe, ok := optionalDescription[0].(func() string); ok { + return describe() + "\n" + } + } + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" +} + +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + actualInput := assertion.actuals[assertion.actualIndex] + matches, err := matcher.Match(actualInput) + assertion.g.THelper() + if err != nil { + description := assertion.buildDescription(optionalDescription...) + assertion.g.Fail(description+err.Error(), 2+assertion.offset) + return false + } + if matches != desiredMatch { + var message string + if desiredMatch { + message = matcher.FailureMessage(actualInput) + } else { + message = matcher.NegatedFailureMessage(actualInput) + } + description := assertion.buildDescription(optionalDescription...) + assertion.g.Fail(description+message, 2+assertion.offset) + return false + } + + return true +} + +// vetActuals vets the actual values, with the (optional) exception of a +// specific value, such as the first value in case non-error assertions, or the +// last value in case of Error()-based assertions. +func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool { + success, message := vetActuals(assertion.actuals, assertion.actualIndex) + if success { + return true + } + + description := assertion.buildDescription(optionalDescription...) + assertion.g.THelper() + assertion.g.Fail(description+message, 2+assertion.offset) + return false +} + +// vetError vets the actual values, except for the final error value, in case +// the final error value is non-zero. Otherwise, it doesn't vet the actual +// values, as these are allowed to take on any values unless there is a non-zero +// error value. +func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool { + if err := assertion.actuals[assertion.actualIndex]; err != nil { + // Go error result idiom: all other actual values must be zero values. + return assertion.vetActuals(optionalDescription...) + } + return true +} + +// vetActuals vets a slice of actual values, optionally skipping a particular +// value slice element, such as the first or last value slice element. +func vetActuals(actuals []interface{}, skipIndex int) (bool, string) { + for i, actual := range actuals { + if i == skipIndex { + continue + } + if actual != nil { + zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() + if !reflect.DeepEqual(zeroValue, actual) { + message := fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual) + return false, message + } + } + } + return true, "" +} diff --git a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion/assertion.go deleted file mode 100644 index a248298f4..000000000 --- a/vendor/github.com/onsi/gomega/internal/assertion/assertion.go +++ /dev/null @@ -1,109 +0,0 @@ -package assertion - -import ( - "fmt" - "reflect" - - "github.com/onsi/gomega/types" -) - -type Assertion struct { - actualInput interface{} - failWrapper *types.GomegaFailWrapper - offset int - extra []interface{} -} - -func New(actualInput interface{}, failWrapper *types.GomegaFailWrapper, offset int, extra ...interface{}) *Assertion { - return &Assertion{ - actualInput: actualInput, - failWrapper: failWrapper, - offset: offset, - extra: extra, - } -} - -func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - assertion.failWrapper.TWithHelper.Helper() - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) -} - -func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - assertion.failWrapper.TWithHelper.Helper() - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) -} - -func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - assertion.failWrapper.TWithHelper.Helper() - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...) -} - -func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - assertion.failWrapper.TWithHelper.Helper() - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) -} - -func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - assertion.failWrapper.TWithHelper.Helper() - return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...) -} - -func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { - switch len(optionalDescription) { - case 0: - return "" - case 1: - if describe, ok := optionalDescription[0].(func() string); ok { - return describe() + "\n" - } - } - return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" -} - -func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { - matches, err := matcher.Match(assertion.actualInput) - assertion.failWrapper.TWithHelper.Helper() - if err != nil { - description := assertion.buildDescription(optionalDescription...) - assertion.failWrapper.Fail(description+err.Error(), 2+assertion.offset) - return false - } - if matches != desiredMatch { - var message string - if desiredMatch { - message = matcher.FailureMessage(assertion.actualInput) - } else { - message = matcher.NegatedFailureMessage(assertion.actualInput) - } - description := assertion.buildDescription(optionalDescription...) - assertion.failWrapper.Fail(description+message, 2+assertion.offset) - return false - } - - return true -} - -func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool { - success, message := vetExtras(assertion.extra) - if success { - return true - } - - description := assertion.buildDescription(optionalDescription...) - assertion.failWrapper.TWithHelper.Helper() - assertion.failWrapper.Fail(description+message, 2+assertion.offset) - return false -} - -func vetExtras(extras []interface{}) (bool, string) { - for i, extra := range extras { - if extra != nil { - zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() - if !reflect.DeepEqual(zeroValue, extra) { - message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) - return false, message - } - } - } - return true, "" -} diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go new file mode 100644 index 000000000..99f4ebcfe --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -0,0 +1,235 @@ +package internal + +import ( + "errors" + "fmt" + "reflect" + "runtime" + "time" + + "github.com/onsi/gomega/types" +) + +type AsyncAssertionType uint + +const ( + AsyncAssertionTypeEventually AsyncAssertionType = iota + AsyncAssertionTypeConsistently +) + +type AsyncAssertion struct { + asyncType AsyncAssertionType + + actualIsFunc bool + actualValue interface{} + actualFunc func() ([]reflect.Value, error) + + timeoutInterval time.Duration + pollingInterval time.Duration + offset int + g *Gomega +} + +func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { + out := &AsyncAssertion{ + asyncType: asyncType, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + offset: offset, + g: g, + } + + switch actualType := reflect.TypeOf(actualInput); { + case actualType.Kind() != reflect.Func: + out.actualValue = actualInput + case actualType.NumIn() == 0 && actualType.NumOut() > 0: + out.actualIsFunc = true + out.actualFunc = func() ([]reflect.Value, error) { + return reflect.ValueOf(actualInput).Call([]reflect.Value{}), nil + } + case actualType.NumIn() == 1 && actualType.In(0).Implements(reflect.TypeOf((*types.Gomega)(nil)).Elem()): + out.actualIsFunc = true + out.actualFunc = func() (values []reflect.Value, err error) { + var assertionFailure error + assertionCapturingGomega := NewGomega(g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + _, file, line, _ := runtime.Caller(skip + 1) + assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message) + panic("stop execution") + }) + + defer func() { + if actualType.NumOut() == 0 { + if assertionFailure == nil { + values = []reflect.Value{reflect.Zero(reflect.TypeOf((*error)(nil)).Elem())} + } else { + values = []reflect.Value{reflect.ValueOf(assertionFailure)} + } + } else { + err = assertionFailure + } + if e := recover(); e != nil && assertionFailure == nil { + panic(e) + } + }() + + values = reflect.ValueOf(actualInput).Call([]reflect.Value{reflect.ValueOf(assertionCapturingGomega)}) + return + } + default: + msg := fmt.Sprintf("The function passed to Gomega's async assertions should either take no arguments and return values, or take a single Gomega interface that it can use to make assertions within the body of the function. When taking a Gomega interface the function can optionally return values or return nothing. The function you passed takes %d arguments and returns %d values.", actualType.NumIn(), actualType.NumOut()) + g.Fail(msg, offset+4) + } + + return out +} + +func (assertion *AsyncAssertion) WithOffset(offset int) types.AsyncAssertion { + assertion.offset = offset + return assertion +} + +func (assertion *AsyncAssertion) WithTimeout(interval time.Duration) types.AsyncAssertion { + assertion.timeoutInterval = interval + return assertion +} + +func (assertion *AsyncAssertion) WithPolling(interval time.Duration) types.AsyncAssertion { + assertion.pollingInterval = interval + return assertion +} + +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.g.THelper() + return assertion.match(matcher, true, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { + assertion.g.THelper() + return assertion.match(matcher, false, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { + switch len(optionalDescription) { + case 0: + return "" + case 1: + if describe, ok := optionalDescription[0].(func() string); ok { + return describe() + "\n" + } + } + return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" +} + +func (assertion *AsyncAssertion) pollActual() (interface{}, error) { + if !assertion.actualIsFunc { + return assertion.actualValue, nil + } + + values, err := assertion.actualFunc() + if err != nil { + return nil, err + } + extras := []interface{}{nil} + for _, value := range values[1:] { + extras = append(extras, value.Interface()) + } + success, message := vetActuals(extras, 0) + if !success { + return nil, errors.New(message) + } + + return values[0].Interface(), nil +} + +func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualIsFunc { + return true + } + return types.MatchMayChangeInTheFuture(matcher, value) +} + +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { + timer := time.Now() + timeout := time.After(assertion.timeoutInterval) + + var matches bool + var err error + mayChange := true + value, err := assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + + assertion.g.THelper() + + fail := func(preamble string) { + errMsg := "" + message := "" + if err != nil { + errMsg = "Error: " + err.Error() + } else { + if desiredMatch { + message = matcher.FailureMessage(value) + } else { + message = matcher.NegatedFailureMessage(value) + } + } + assertion.g.THelper() + description := assertion.buildDescription(optionalDescription...) + assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + } + + if assertion.asyncType == AsyncAssertionTypeEventually { + for { + if err == nil && matches == desiredMatch { + return true + } + + if !mayChange { + fail("No future change is possible. Bailing out early") + return false + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + fail("Timed out") + return false + } + } + } else if assertion.asyncType == AsyncAssertionTypeConsistently { + for { + if !(err == nil && matches == desiredMatch) { + fail("Failed") + return false + } + + if !mayChange { + return true + } + + select { + case <-time.After(assertion.pollingInterval): + value, err = assertion.pollActual() + if err == nil { + mayChange = assertion.matcherMayChange(matcher, value) + matches, err = matcher.Match(value) + } + case <-timeout: + return true + } + } + } + + return false +} diff --git a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go deleted file mode 100644 index 5204836bf..000000000 --- a/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go +++ /dev/null @@ -1,198 +0,0 @@ -// untested sections: 2 - -package asyncassertion - -import ( - "errors" - "fmt" - "reflect" - "time" - - "github.com/onsi/gomega/internal/oraclematcher" - "github.com/onsi/gomega/types" -) - -type AsyncAssertionType uint - -const ( - AsyncAssertionTypeEventually AsyncAssertionType = iota - AsyncAssertionTypeConsistently -) - -type AsyncAssertion struct { - asyncType AsyncAssertionType - actualInput interface{} - timeoutInterval time.Duration - pollingInterval time.Duration - failWrapper *types.GomegaFailWrapper - offset int -} - -func New(asyncType AsyncAssertionType, actualInput interface{}, failWrapper *types.GomegaFailWrapper, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { - actualType := reflect.TypeOf(actualInput) - if actualType.Kind() == reflect.Func { - if actualType.NumIn() != 0 || actualType.NumOut() == 0 { - panic("Expected a function with no arguments and one or more return values.") - } - } - - return &AsyncAssertion{ - asyncType: asyncType, - actualInput: actualInput, - failWrapper: failWrapper, - timeoutInterval: timeoutInterval, - pollingInterval: pollingInterval, - offset: offset, - } -} - -func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - assertion.failWrapper.TWithHelper.Helper() - return assertion.match(matcher, true, optionalDescription...) -} - -func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { - assertion.failWrapper.TWithHelper.Helper() - return assertion.match(matcher, false, optionalDescription...) -} - -func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { - switch len(optionalDescription) { - case 0: - return "" - case 1: - if describe, ok := optionalDescription[0].(func() string); ok { - return describe() + "\n" - } - } - return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" -} - -func (assertion *AsyncAssertion) actualInputIsAFunction() bool { - actualType := reflect.TypeOf(assertion.actualInput) - return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0 -} - -func (assertion *AsyncAssertion) pollActual() (interface{}, error) { - if assertion.actualInputIsAFunction() { - values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{}) - - extras := []interface{}{} - for _, value := range values[1:] { - extras = append(extras, value.Interface()) - } - - success, message := vetExtras(extras) - - if !success { - return nil, errors.New(message) - } - - return values[0].Interface(), nil - } - - return assertion.actualInput, nil -} - -func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { - if assertion.actualInputIsAFunction() { - return true - } - - return oraclematcher.MatchMayChangeInTheFuture(matcher, value) -} - -func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { - timer := time.Now() - timeout := time.After(assertion.timeoutInterval) - - var matches bool - var err error - mayChange := true - value, err := assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } - - assertion.failWrapper.TWithHelper.Helper() - - fail := func(preamble string) { - errMsg := "" - message := "" - if err != nil { - errMsg = "Error: " + err.Error() - } else { - if desiredMatch { - message = matcher.FailureMessage(value) - } else { - message = matcher.NegatedFailureMessage(value) - } - } - assertion.failWrapper.TWithHelper.Helper() - description := assertion.buildDescription(optionalDescription...) - assertion.failWrapper.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) - } - - if assertion.asyncType == AsyncAssertionTypeEventually { - for { - if err == nil && matches == desiredMatch { - return true - } - - if !mayChange { - fail("No future change is possible. Bailing out early") - return false - } - - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } - case <-timeout: - fail("Timed out") - return false - } - } - } else if assertion.asyncType == AsyncAssertionTypeConsistently { - for { - if !(err == nil && matches == desiredMatch) { - fail("Failed") - return false - } - - if !mayChange { - return true - } - - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } - case <-timeout: - return true - } - } - } - - return false -} - -func vetExtras(extras []interface{}) (bool, string) { - for i, extra := range extras { - if extra != nil { - zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface() - if !reflect.DeepEqual(zeroValue, extra) { - message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra) - return false, message - } - } - } - return true, "" -} diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go new file mode 100644 index 000000000..af8d989fa --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -0,0 +1,71 @@ +package internal + +import ( + "fmt" + "os" + "reflect" + "time" +) + +type DurationBundle struct { + EventuallyTimeout time.Duration + EventuallyPollingInterval time.Duration + ConsistentlyDuration time.Duration + ConsistentlyPollingInterval time.Duration +} + +const ( + EventuallyTimeoutEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_TIMEOUT" + EventuallyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_POLLING_INTERVAL" + + ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION" + ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL" +) + +func FetchDefaultDurationBundle() DurationBundle { + return DurationBundle{ + EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second), + EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond), + + ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond), + ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond), + } +} + +func durationFromEnv(key string, defaultDuration time.Duration) time.Duration { + value := os.Getenv(key) + if value == "" { + return defaultDuration + } + duration, err := time.ParseDuration(value) + if err != nil { + panic(fmt.Sprintf("Expected a duration when using %s! Parse error %v", key, err)) + } + return duration +} + +func toDuration(input interface{}) time.Duration { + duration, ok := input.(time.Duration) + if ok { + return duration + } + + value := reflect.ValueOf(input) + kind := reflect.TypeOf(input).Kind() + + if reflect.Int <= kind && kind <= reflect.Int64 { + return time.Duration(value.Int()) * time.Second + } else if reflect.Uint <= kind && kind <= reflect.Uint64 { + return time.Duration(value.Uint()) * time.Second + } else if reflect.Float32 <= kind && kind <= reflect.Float64 { + return time.Duration(value.Float() * float64(time.Second)) + } else if reflect.String == kind { + duration, err := time.ParseDuration(value.String()) + if err != nil { + panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + } + return duration + } + + panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) +} diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go new file mode 100644 index 000000000..d26a67485 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -0,0 +1,102 @@ +package internal + +import ( + "time" + + "github.com/onsi/gomega/types" +) + +type Gomega struct { + Fail types.GomegaFailHandler + THelper func() + DurationBundle DurationBundle +} + +func NewGomega(bundle DurationBundle) *Gomega { + return &Gomega{ + Fail: nil, + THelper: nil, + DurationBundle: bundle, + } +} + +func (g *Gomega) IsConfigured() bool { + return g.Fail != nil && g.THelper != nil +} + +func (g *Gomega) ConfigureWithFailHandler(fail types.GomegaFailHandler) *Gomega { + g.Fail = fail + g.THelper = func() {} + return g +} + +func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega { + g.Fail = func(message string, _ ...int) { + t.Helper() + t.Fatalf("\n%s", message) + } + g.THelper = t.Helper + return g +} + +func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion { + return g.ExpectWithOffset(0, actual, extra...) +} + +func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion { + return g.ExpectWithOffset(0, actual, extra...) +} + +func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion { + return NewAssertion(actual, g, offset, extra...) +} + +func (g *Gomega) Eventually(actual interface{}, intervals ...interface{}) types.AsyncAssertion { + return g.EventuallyWithOffset(0, actual, intervals...) +} + +func (g *Gomega) EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion { + timeoutInterval := g.DurationBundle.EventuallyTimeout + pollingInterval := g.DurationBundle.EventuallyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + + return NewAsyncAssertion(AsyncAssertionTypeEventually, actual, g, timeoutInterval, pollingInterval, offset) +} + +func (g *Gomega) Consistently(actual interface{}, intervals ...interface{}) types.AsyncAssertion { + return g.ConsistentlyWithOffset(0, actual, intervals...) +} + +func (g *Gomega) ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion { + timeoutInterval := g.DurationBundle.ConsistentlyDuration + pollingInterval := g.DurationBundle.ConsistentlyPollingInterval + if len(intervals) > 0 { + timeoutInterval = toDuration(intervals[0]) + } + if len(intervals) > 1 { + pollingInterval = toDuration(intervals[1]) + } + + return NewAsyncAssertion(AsyncAssertionTypeConsistently, actual, g, timeoutInterval, pollingInterval, offset) +} + +func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) { + g.DurationBundle.EventuallyTimeout = t +} + +func (g *Gomega) SetDefaultEventuallyPollingInterval(t time.Duration) { + g.DurationBundle.EventuallyPollingInterval = t +} + +func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) { + g.DurationBundle.ConsistentlyDuration = t +} + +func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) { + g.DurationBundle.ConsistentlyPollingInterval = t +} diff --git a/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go b/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go new file mode 100644 index 000000000..6864055a5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go @@ -0,0 +1,48 @@ +//go:build go1.16 +// +build go1.16 + +// Package gutil is a replacement for ioutil, which should not be used in new +// code as of Go 1.16. With Go 1.16 and higher, this implementation +// uses the ioutil replacement functions in "io" and "os" with some +// Gomega specifics. This means that we should not get deprecation warnings +// for ioutil when they are added. +package gutil + +import ( + "io" + "os" +) + +func NopCloser(r io.Reader) io.ReadCloser { + return io.NopCloser(r) +} + +func ReadAll(r io.Reader) ([]byte, error) { + return io.ReadAll(r) +} + +func ReadDir(dirname string) ([]string, error) { + entries, err := os.ReadDir(dirname) + if err != nil { + return nil, err + } + + var names []string + for _, entry := range entries { + names = append(names, entry.Name()) + } + + return names, nil +} + +func ReadFile(filename string) ([]byte, error) { + return os.ReadFile(filename) +} + +func MkdirTemp(dir, pattern string) (string, error) { + return os.MkdirTemp(dir, pattern) +} + +func WriteFile(filename string, data []byte) error { + return os.WriteFile(filename, data, 0644) +} diff --git a/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go b/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go new file mode 100644 index 000000000..5c0ce1ee3 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go @@ -0,0 +1,47 @@ +//go:build !go1.16 +// +build !go1.16 + +// Package gutil is a replacement for ioutil, which should not be used in new +// code as of Go 1.16. With Go 1.15 and lower, this implementation +// uses the ioutil functions, meaning that although Gomega is not officially +// supported on these versions, it is still likely to work. +package gutil + +import ( + "io" + "io/ioutil" +) + +func NopCloser(r io.Reader) io.ReadCloser { + return ioutil.NopCloser(r) +} + +func ReadAll(r io.Reader) ([]byte, error) { + return ioutil.ReadAll(r) +} + +func ReadDir(dirname string) ([]string, error) { + files, err := ioutil.ReadDir(dirname) + if err != nil { + return nil, err + } + + var names []string + for _, file := range files { + names = append(names, file.Name()) + } + + return names, nil +} + +func ReadFile(filename string) ([]byte, error) { + return ioutil.ReadFile(filename) +} + +func MkdirTemp(dir, pattern string) (string, error) { + return ioutil.TempDir(dir, pattern) +} + +func WriteFile(filename string, data []byte) error { + return ioutil.WriteFile(filename, data, 0644) +} diff --git a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go deleted file mode 100644 index 66cad88a1..000000000 --- a/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go +++ /dev/null @@ -1,25 +0,0 @@ -package oraclematcher - -import "github.com/onsi/gomega/types" - -/* -GomegaMatchers that also match the OracleMatcher interface can convey information about -whether or not their result will change upon future attempts. - -This allows `Eventually` and `Consistently` to short circuit if success becomes impossible. - -For example, a process' exit code can never change. So, gexec's Exit matcher returns `true` -for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. -*/ -type OracleMatcher interface { - MatchMayChangeInTheFuture(actual interface{}) bool -} - -func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool { - oracleMatcher, ok := matcher.(OracleMatcher) - if !ok { - return true - } - - return oracleMatcher.MatchMayChangeInTheFuture(value) -} diff --git a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go deleted file mode 100644 index bb27032f6..000000000 --- a/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go +++ /dev/null @@ -1,60 +0,0 @@ -package testingtsupport - -import ( - "regexp" - "runtime/debug" - "strings" - - "github.com/onsi/gomega/types" -) - -var StackTracePruneRE = regexp.MustCompile(`\/gomega\/|\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) - -type EmptyTWithHelper struct{} - -func (e EmptyTWithHelper) Helper() {} - -type gomegaTestingT interface { - Fatalf(format string, args ...interface{}) -} - -func BuildTestingTGomegaFailWrapper(t gomegaTestingT) *types.GomegaFailWrapper { - tWithHelper, hasHelper := t.(types.TWithHelper) - if !hasHelper { - tWithHelper = EmptyTWithHelper{} - } - - fail := func(message string, callerSkip ...int) { - if hasHelper { - tWithHelper.Helper() - t.Fatalf("\n%s", message) - } else { - skip := 2 - if len(callerSkip) > 0 { - skip += callerSkip[0] - } - stackTrace := pruneStack(string(debug.Stack()), skip) - t.Fatalf("\n%s\n%s\n", stackTrace, message) - } - } - - return &types.GomegaFailWrapper{ - Fail: fail, - TWithHelper: tWithHelper, - } -} - -func pruneStack(fullStackTrace string, skip int) string { - stack := strings.Split(fullStackTrace, "\n")[1:] - if len(stack) > 2*skip { - stack = stack[2*skip:] - } - prunedStack := []string{} - for i := 0; i < len(stack)/2; i++ { - if !StackTracePruneRE.Match([]byte(stack[i*2])) { - prunedStack = append(prunedStack, stack[i*2]) - prunedStack = append(prunedStack, stack[i*2+1]) - } - } - return strings.Join(prunedStack, "\n") -} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 16218d4c5..b46e461a5 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -342,6 +342,54 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { } } +//HaveField succeeds if actual is a struct and the value at the passed in field +//matches the passed in matcher. By default HaveField used Equal() to perform the match, +//however a matcher can be passed in in stead. +// +//The field must be a string that resolves to the name of a field in the struct. Structs can be traversed +//using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked. +//Such methods must take no arguments and return a single value: +// +// type Book struct { +// Title string +// Author Person +// } +// type Person struct { +// FirstName string +// LastName string +// DOB time.Time +// } +// Expect(book).To(HaveField("Title", "Les Miserables")) +// Expect(book).To(HaveField("Title", ContainSubstring("Les")) +// Expect(book).To(HaveField("Author.FirstName", Equal("Victor")) +// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900)) +func HaveField(field string, expected interface{}) types.GomegaMatcher { + return &matchers.HaveFieldMatcher{ + Field: field, + Expected: expected, + } +} + +// HaveValue applies the given matcher to the value of actual, optionally and +// repeatedly dereferencing pointers or taking the concrete value of interfaces. +// Thus, the matcher will always be applied to non-pointer and non-interface +// values only. HaveValue will fail with an error if a pointer or interface is +// nil. It will also fail for more than 31 pointer or interface dereferences to +// guard against mistakenly applying it to arbitrarily deep linked pointers. +// +// HaveValue differs from gstruct.PointTo in that it does not expect actual to +// be a pointer (as gstruct.PointTo does) but instead also accepts non-pointer +// and even interface values. +// +// actual := 42 +// Expect(actual).To(HaveValue(42)) +// Expect(&actual).To(HaveValue(42)) +func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher { + return &matchers.HaveValueMatcher{ + Matcher: matcher, + } +} + //BeNumerically performs numerical assertions in a type-agnostic way. //Actual and expected should be numbers, though the specific type of //number is irrelevant (float32, float64, uint8, etc...). @@ -423,10 +471,29 @@ func BeADirectory() types.GomegaMatcher { //Expected must be either an int or a string. // Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 // Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" -func HaveHTTPStatus(expected interface{}) types.GomegaMatcher { +// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 +func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher { return &matchers.HaveHTTPStatusMatcher{Expected: expected} } +// HaveHTTPHeaderWithValue succeeds if the header is found and the value matches. +// Actual must be either a *http.Response or *httptest.ResponseRecorder. +// Expected must be a string header name, followed by a header value which +// can be a string, or another matcher. +func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatcher { + return &matchers.HaveHTTPHeaderWithValueMatcher{ + Header: header, + Value: value, + } +} + +// HaveHTTPBody matches if the body matches. +// Actual must be either a *http.Response or *httptest.ResponseRecorder. +// Expected must be either a string, []byte, or other matcher +func HaveHTTPBody(expected interface{}) types.GomegaMatcher { + return &matchers.HaveHTTPBodyMatcher{Expected: expected} +} + //And succeeds only if all of the given matchers succeed. //The matchers are tried in order, and will fail-fast if one doesn't succeed. // Expect("hi").To(And(HaveLen(2), Equal("hi")) @@ -466,11 +533,24 @@ func Not(matcher types.GomegaMatcher) types.GomegaMatcher { } //WithTransform applies the `transform` to the actual value and matches it against `matcher`. -//The given transform must be a function of one parameter that returns one value. +//The given transform must be either a function of one parameter that returns one value or a +// function of one parameter that returns two values, where the second value must be of the +// error type. // var plus1 = func(i int) int { return i + 1 } // Expect(1).To(WithTransform(plus1, Equal(2)) // +// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" } +// Expect(1).To(WithTransform(failingplus1, Equal(2))) +// //And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { return matchers.NewWithTransformMatcher(transform, matcher) } + +//Satisfy matches the actual value against the `predicate` function. +//The given predicate must be a function of one paramter that returns bool. +// var isEven = func(i int) bool { return i%2 == 0 } +// Expect(2).To(Satisfy(isEven)) +func Satisfy(predicate interface{}) types.GomegaMatcher { + return matchers.NewSatisfyMatcher(predicate) +} diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go index d83a29164..6bd826adc 100644 --- a/vendor/github.com/onsi/gomega/matchers/and.go +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/onsi/gomega/format" - "github.com/onsi/gomega/internal/oraclematcher" "github.com/onsi/gomega/types" ) @@ -52,12 +51,12 @@ func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { if m.firstFailedMatcher == nil { // so all matchers succeeded.. Any one of them changing would change the result. for _, matcher := range m.Matchers { - if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + if types.MatchMayChangeInTheFuture(matcher, actual) { return true } } return false // none of were going to change } // one of the matchers failed.. it must be able to change in order to affect the result - return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) + return types.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go index 1f9d7a8e6..9ee75a5d5 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go @@ -18,23 +18,9 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err return false, fmt.Errorf("BeElement matcher expects actual to be typed") } - length := len(matcher.Elements) - valueAt := func(i int) interface{} { - return matcher.Elements[i] - } - // Special handling of a single element of type Array or Slice - if length == 1 && isArrayOrSlice(valueAt(0)) { - element := valueAt(0) - value := reflect.ValueOf(element) - length = value.Len() - valueAt = func(i int) interface{} { - return value.Index(i).Interface() - } - } - var lastError error - for i := 0; i < length; i++ { - matcher := &EqualMatcher{Expected: valueAt(i)} + for _, m := range flatten(matcher.Elements) { + matcher := &EqualMatcher{Expected: m} success, err := matcher.Match(actual) if err != nil { lastError = err @@ -49,9 +35,9 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err } func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to be an element of", matcher.Elements) + return format.Message(actual, "to be an element of", presentable(matcher.Elements)) } func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to be an element of", matcher.Elements) + return format.Message(actual, "not to be an element of", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go index f72591a1a..100735de3 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -45,7 +45,7 @@ func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, er return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) } if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) { - return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1)) + return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[1], 1)) } switch matcher.Comparator { diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index e453b22d1..e8ef0dee1 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -57,17 +57,21 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { return } -func matchers(expectedElems []interface{}) (matchers []interface{}) { - elems := expectedElems - if len(expectedElems) == 1 && isArrayOrSlice(expectedElems[0]) { - elems = []interface{}{} - value := reflect.ValueOf(expectedElems[0]) - for i := 0; i < value.Len(); i++ { - elems = append(elems, value.Index(i).Interface()) - } +func flatten(elems []interface{}) []interface{} { + if len(elems) != 1 || !isArrayOrSlice(elems[0]) { + return elems } - for _, e := range elems { + value := reflect.ValueOf(elems[0]) + flattened := make([]interface{}, value.Len()) + for i := 0; i < value.Len(); i++ { + flattened[i] = value.Index(i).Interface() + } + return flattened +} + +func matchers(expectedElems []interface{}) (matchers []interface{}) { + for _, e := range flatten(expectedElems) { matcher, isMatcher := e.(omegaMatcher) if !isMatcher { matcher = &EqualMatcher{Expected: e} @@ -77,6 +81,29 @@ func matchers(expectedElems []interface{}) (matchers []interface{}) { return } +func presentable(elems []interface{}) interface{} { + elems = flatten(elems) + + if len(elems) == 0 { + return []interface{}{} + } + + sv := reflect.ValueOf(elems) + tt := sv.Index(0).Elem().Type() + for i := 1; i < sv.Len(); i++ { + if sv.Index(i).Elem().Type() != tt { + return elems + } + } + + ss := reflect.MakeSlice(reflect.SliceOf(tt), sv.Len(), sv.Len()) + for i := 0; i < sv.Len(); i++ { + ss.Index(i).Set(sv.Index(i).Elem()) + } + + return ss.Interface() +} + func valuesOf(actual interface{}) []interface{} { value := reflect.ValueOf(actual) values := []interface{}{} @@ -95,11 +122,11 @@ func valuesOf(actual interface{}) []interface{} { } func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { - message = format.Message(actual, "to consist of", matcher.Elements) + message = format.Message(actual, "to consist of", presentable(matcher.Elements)) message = appendMissingElements(message, matcher.missingElements) if len(matcher.extraElements) > 0 { message = fmt.Sprintf("%s\nthe extra elements were\n%s", message, - format.Object(matcher.extraElements, 1)) + format.Object(presentable(matcher.extraElements), 1)) } return } @@ -109,9 +136,9 @@ func appendMissingElements(message string, missingElements []interface{}) string return message } return fmt.Sprintf("%s\nthe missing elements were\n%s", message, - format.Object(missingElements, 1)) + format.Object(presentable(missingElements), 1)) } func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to consist of", matcher.Elements) + return format.Message(actual, "not to consist of", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go index 19a9e78f8..946cd8bea 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go @@ -35,10 +35,10 @@ func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, } func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) { - message = format.Message(actual, "to contain elements", matcher.Elements) + message = format.Message(actual, "to contain elements", presentable(matcher.Elements)) return appendMissingElements(message, matcher.missingElements) } func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to contain elements", matcher.Elements) + return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go new file mode 100644 index 000000000..e1fe934d5 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -0,0 +1,87 @@ +package matchers + +import ( + "fmt" + "reflect" + "strings" + + "github.com/onsi/gomega/format" +) + +func extractField(actual interface{}, field string) (interface{}, error) { + fields := strings.SplitN(field, ".", 2) + actualValue := reflect.ValueOf(actual) + + if actualValue.Kind() == reflect.Ptr { + actualValue = actualValue.Elem() + } + if actualValue == (reflect.Value{}) { + return nil, fmt.Errorf("HaveField encountered nil while dereferencing a pointer of type %T.", actual) + } + + if actualValue.Kind() != reflect.Struct { + return nil, fmt.Errorf("HaveField encountered:\n%s\nWhich is not a struct.", format.Object(actual, 1)) + } + + var extractedValue reflect.Value + + if strings.HasSuffix(fields[0], "()") { + extractedValue = actualValue.MethodByName(strings.TrimSuffix(fields[0], "()")) + if extractedValue == (reflect.Value{}) { + return nil, fmt.Errorf("HaveField could not find method named '%s' in struct of type %T.", fields[0], actual) + } + t := extractedValue.Type() + if t.NumIn() != 0 || t.NumOut() != 1 { + return nil, fmt.Errorf("HaveField found an invalid method named '%s' in struct of type %T.\nMethods must take no arguments and return exactly one value.", fields[0], actual) + } + extractedValue = extractedValue.Call([]reflect.Value{})[0] + } else { + extractedValue = actualValue.FieldByName(fields[0]) + if extractedValue == (reflect.Value{}) { + return nil, fmt.Errorf("HaveField could not find field named '%s' in struct:\n%s", fields[0], format.Object(actual, 1)) + } + } + + if len(fields) == 1 { + return extractedValue.Interface(), nil + } else { + return extractField(extractedValue.Interface(), fields[1]) + } +} + +type HaveFieldMatcher struct { + Field string + Expected interface{} + + extractedField interface{} + expectedMatcher omegaMatcher +} + +func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { + matcher.extractedField, err = extractField(actual, matcher.Field) + if err != nil { + return false, err + } + + var isMatcher bool + matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher) + if !isMatcher { + matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected} + } + + return matcher.expectedMatcher.Match(matcher.extractedField) +} + +func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) { + message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field) + message += matcher.expectedMatcher.FailureMessage(matcher.extractedField) + + return message +} + +func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { + message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field) + message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField) + + return message +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go new file mode 100644 index 000000000..6a3dcdc35 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go @@ -0,0 +1,101 @@ +package matchers + +import ( + "fmt" + "net/http" + "net/http/httptest" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/gutil" + "github.com/onsi/gomega/types" +) + +type HaveHTTPBodyMatcher struct { + Expected interface{} + cachedBody []byte +} + +func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) { + body, err := matcher.body(actual) + if err != nil { + return false, err + } + + switch e := matcher.Expected.(type) { + case string: + return (&EqualMatcher{Expected: e}).Match(string(body)) + case []byte: + return (&EqualMatcher{Expected: e}).Match(body) + case types.GomegaMatcher: + return e.Match(body) + default: + return false, fmt.Errorf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1)) + } +} + +func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) { + body, err := matcher.body(actual) + if err != nil { + return fmt.Sprintf("failed to read body: %s", err) + } + + switch e := matcher.Expected.(type) { + case string: + return (&EqualMatcher{Expected: e}).FailureMessage(string(body)) + case []byte: + return (&EqualMatcher{Expected: e}).FailureMessage(body) + case types.GomegaMatcher: + return e.FailureMessage(body) + default: + return fmt.Sprintf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1)) + } +} + +func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + body, err := matcher.body(actual) + if err != nil { + return fmt.Sprintf("failed to read body: %s", err) + } + + switch e := matcher.Expected.(type) { + case string: + return (&EqualMatcher{Expected: e}).NegatedFailureMessage(string(body)) + case []byte: + return (&EqualMatcher{Expected: e}).NegatedFailureMessage(body) + case types.GomegaMatcher: + return e.NegatedFailureMessage(body) + default: + return fmt.Sprintf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1)) + } +} + +// body returns the body. It is cached because once we read it in Match() +// the Reader is closed and it is not readable again in FailureMessage() +// or NegatedFailureMessage() +func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) { + if matcher.cachedBody != nil { + return matcher.cachedBody, nil + } + + body := func(a *http.Response) ([]byte, error) { + if a.Body != nil { + defer a.Body.Close() + var err error + matcher.cachedBody, err = gutil.ReadAll(a.Body) + if err != nil { + return nil, fmt.Errorf("error reading response body: %w", err) + } + } + return matcher.cachedBody, nil + } + + switch a := actual.(type) { + case *http.Response: + return body(a) + case *httptest.ResponseRecorder: + return body(a.Result()) + default: + return nil, fmt.Errorf("HaveHTTPBody matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1)) + } + +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go new file mode 100644 index 000000000..c256f452e --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go @@ -0,0 +1,81 @@ +package matchers + +import ( + "fmt" + "net/http" + "net/http/httptest" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" +) + +type HaveHTTPHeaderWithValueMatcher struct { + Header string + Value interface{} +} + +func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) { + headerValue, err := matcher.extractHeader(actual) + if err != nil { + return false, err + } + + headerMatcher, err := matcher.getSubMatcher() + if err != nil { + return false, err + } + + return headerMatcher.Match(headerValue) +} + +func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string { + headerValue, err := matcher.extractHeader(actual) + if err != nil { + panic(err) // protected by Match() + } + + headerMatcher, err := matcher.getSubMatcher() + if err != nil { + panic(err) // protected by Match() + } + + diff := format.IndentString(headerMatcher.FailureMessage(headerValue), 1) + return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff) +} + +func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { + headerValue, err := matcher.extractHeader(actual) + if err != nil { + panic(err) // protected by Match() + } + + headerMatcher, err := matcher.getSubMatcher() + if err != nil { + panic(err) // protected by Match() + } + + diff := format.IndentString(headerMatcher.NegatedFailureMessage(headerValue), 1) + return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff) +} + +func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatcher, error) { + switch m := matcher.Value.(type) { + case string: + return &EqualMatcher{Expected: matcher.Value}, nil + case types.GomegaMatcher: + return m, nil + default: + return nil, fmt.Errorf("HaveHTTPHeaderWithValue matcher must be passed a string or a GomegaMatcher. Got:\n%s", format.Object(matcher.Value, 1)) + } +} + +func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) { + switch r := actual.(type) { + case *http.Response: + return r.Header.Get(matcher.Header), nil + case *httptest.ResponseRecorder: + return r.Result().Header.Get(matcher.Header), nil + default: + return "", fmt.Errorf("HaveHTTPHeaderWithValue matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1)) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go index 3ce4800b7..0f66e46ec 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go @@ -4,12 +4,15 @@ import ( "fmt" "net/http" "net/http/httptest" + "reflect" + "strings" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/internal/gutil" ) type HaveHTTPStatusMatcher struct { - Expected interface{} + Expected []interface{} } func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) { @@ -23,20 +26,71 @@ func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, e return false, fmt.Errorf("HaveHTTPStatus matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1)) } - switch e := matcher.Expected.(type) { - case int: - return resp.StatusCode == e, nil - case string: - return resp.Status == e, nil + if len(matcher.Expected) == 0 { + return false, fmt.Errorf("HaveHTTPStatus matcher must be passed an int or a string. Got nothing") } - return false, fmt.Errorf("HaveHTTPStatus matcher must be passed an int or a string. Got:\n%s", format.Object(matcher.Expected, 1)) + for _, expected := range matcher.Expected { + switch e := expected.(type) { + case int: + if resp.StatusCode == e { + return true, nil + } + case string: + if resp.Status == e { + return true, nil + } + default: + return false, fmt.Errorf("HaveHTTPStatus matcher must be passed int or string types. Got:\n%s", format.Object(expected, 1)) + } + } + + return false, nil } func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) { - return format.Message(actual, "to have HTTP status", matcher.Expected) + return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString()) } func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to have HTTP status", matcher.Expected) + return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString()) +} + +func (matcher *HaveHTTPStatusMatcher) expectedString() string { + var lines []string + for _, expected := range matcher.Expected { + lines = append(lines, format.Object(expected, 1)) + } + return strings.Join(lines, "\n") +} + +func formatHttpResponse(input interface{}) string { + var resp *http.Response + switch r := input.(type) { + case *http.Response: + resp = r + case *httptest.ResponseRecorder: + resp = r.Result() + default: + return "cannot format invalid HTTP response" + } + + body := "" + if resp.Body != nil { + defer resp.Body.Close() + data, err := gutil.ReadAll(resp.Body) + if err != nil { + data = []byte("") + } + body = format.Object(string(data), 0) + } + + var s strings.Builder + s.WriteString(fmt.Sprintf("%s<%s>: {\n", format.Indent, reflect.TypeOf(input))) + s.WriteString(fmt.Sprintf("%s%sStatus: %s\n", format.Indent, format.Indent, format.Object(resp.Status, 0))) + s.WriteString(fmt.Sprintf("%s%sStatusCode: %s\n", format.Indent, format.Indent, format.Object(resp.StatusCode, 0))) + s.WriteString(fmt.Sprintf("%s%sBody: %s\n", format.Indent, format.Indent, body)) + s.WriteString(fmt.Sprintf("%s}", format.Indent)) + + return s.String() } diff --git a/vendor/github.com/onsi/gomega/matchers/have_value.go b/vendor/github.com/onsi/gomega/matchers/have_value.go new file mode 100644 index 000000000..f67252835 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_value.go @@ -0,0 +1,54 @@ +package matchers + +import ( + "errors" + "reflect" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" +) + +const maxIndirections = 31 + +type HaveValueMatcher struct { + Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value. + resolvedActual interface{} // the ("resolved") value. +} + +func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) { + val := reflect.ValueOf(actual) + for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- { + // return an error if value isn't valid. Please note that we cannot + // check for nil here, as we might not deal with a pointer or interface + // at this point. + if !val.IsValid() { + return false, errors.New(format.Message( + actual, "not to be ")) + } + switch val.Kind() { + case reflect.Ptr, reflect.Interface: + // resolve pointers and interfaces to their values, then rinse and + // repeat. + if val.IsNil() { + return false, errors.New(format.Message( + actual, "not to be ")) + } + val = val.Elem() + continue + default: + // forward the final value to the specified matcher. + m.resolvedActual = val.Interface() + return m.Matcher.Match(m.resolvedActual) + } + } + // too many indirections: extreme star gazing, indeed...? + return false, errors.New(format.Message(actual, "too many indirections")) +} + +func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) { + return m.Matcher.FailureMessage(m.resolvedActual) +} + +func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) { + return m.Matcher.NegatedFailureMessage(m.resolvedActual) +} diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index 4e09239ff..c8993a86d 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -1,11 +1,11 @@ package matchers import ( + "errors" "fmt" "reflect" "github.com/onsi/gomega/format" - "golang.org/x/xerrors" ) type MatchErrorMatcher struct { @@ -25,7 +25,7 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e expected := matcher.Expected if isError(expected) { - return reflect.DeepEqual(actualErr, expected) || xerrors.Is(actualErr, expected.(error)), nil + return reflect.DeepEqual(actualErr, expected) || errors.Is(actualErr, expected.(error)), nil } if isString(expected) { diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go index 2c91670bd..78b71910d 100644 --- a/vendor/github.com/onsi/gomega/matchers/not.go +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -1,7 +1,6 @@ package matchers import ( - "github.com/onsi/gomega/internal/oraclematcher" "github.com/onsi/gomega/types" ) @@ -26,5 +25,5 @@ func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) } func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { - return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value + return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value } diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go index 3bf799800..841ae26ab 100644 --- a/vendor/github.com/onsi/gomega/matchers/or.go +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/onsi/gomega/format" - "github.com/onsi/gomega/internal/oraclematcher" "github.com/onsi/gomega/types" ) @@ -54,11 +53,11 @@ func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { if m.firstSuccessfulMatcher != nil { // one of the matchers succeeded.. it must be able to change in order to affect the result - return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) + return types.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual) } else { // so all matchers failed.. Any one of them changing would change the result. for _, matcher := range m.Matchers { - if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) { + if types.MatchMayChangeInTheFuture(matcher, actual) { return true } } diff --git a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go new file mode 100644 index 000000000..ec68fe8b6 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go @@ -0,0 +1,66 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type SatisfyMatcher struct { + Predicate interface{} + + // cached type + predicateArgType reflect.Type +} + +func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher { + if predicate == nil { + panic("predicate cannot be nil") + } + predicateType := reflect.TypeOf(predicate) + if predicateType.Kind() != reflect.Func { + panic("predicate must be a function") + } + if predicateType.NumIn() != 1 { + panic("predicate must have 1 argument") + } + if predicateType.NumOut() != 1 || predicateType.Out(0).Kind() != reflect.Bool { + panic("predicate must return bool") + } + + return &SatisfyMatcher{ + Predicate: predicate, + predicateArgType: predicateType.In(0), + } +} + +func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) { + // prepare a parameter to pass to the predicate + var param reflect.Value + if actual != nil && reflect.TypeOf(actual).AssignableTo(m.predicateArgType) { + // The dynamic type of actual is compatible with the predicate argument. + param = reflect.ValueOf(actual) + + } else if actual == nil && m.predicateArgType.Kind() == reflect.Interface { + // The dynamic type of actual is unknown, so there's no way to make its + // reflect.Value. Create a nil of the predicate argument, which is known. + param = reflect.Zero(m.predicateArgType) + + } else { + return false, fmt.Errorf("predicate expects '%s' but we have '%T'", m.predicateArgType, actual) + } + + // call the predicate with `actual` + fn := reflect.ValueOf(m.Predicate) + result := fn.Call([]reflect.Value{param}) + return result[0].Bool(), nil +} + +func (m *SatisfyMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to satisfy predicate", m.Predicate) +} + +func (m *SatisfyMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to not satisfy predicate", m.Predicate) +} diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go index 8e58d8a0f..6f743b1b3 100644 --- a/vendor/github.com/onsi/gomega/matchers/with_transform.go +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -4,13 +4,12 @@ import ( "fmt" "reflect" - "github.com/onsi/gomega/internal/oraclematcher" "github.com/onsi/gomega/types" ) type WithTransformMatcher struct { // input - Transform interface{} // must be a function of one parameter that returns one value + Transform interface{} // must be a function of one parameter that returns one value and an optional error Matcher types.GomegaMatcher // cached value @@ -20,6 +19,9 @@ type WithTransformMatcher struct { transformedValue interface{} } +// reflect.Type for error +var errorT = reflect.TypeOf((*error)(nil)).Elem() + func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { if transform == nil { panic("transform function cannot be nil") @@ -28,8 +30,10 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) if txType.NumIn() != 1 { panic("transform function must have 1 argument") } - if txType.NumOut() != 1 { - panic("transform function must have 1 return value") + if numout := txType.NumOut(); numout != 1 { + if numout != 2 || !txType.Out(1).AssignableTo(errorT) { + panic("transform function must either have 1 return value, or 1 return value plus 1 error value") + } } return &WithTransformMatcher{ @@ -40,15 +44,29 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) } func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { - // return error if actual's type is incompatible with Transform function's argument type - actualType := reflect.TypeOf(actual) - if !actualType.AssignableTo(m.transformArgType) { - return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType) + // prepare a parameter to pass to the Transform function + var param reflect.Value + if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) { + // The dynamic type of actual is compatible with the transform argument. + param = reflect.ValueOf(actual) + + } else if actual == nil && m.transformArgType.Kind() == reflect.Interface { + // The dynamic type of actual is unknown, so there's no way to make its + // reflect.Value. Create a nil of the transform argument, which is known. + param = reflect.Zero(m.transformArgType) + + } else { + return false, fmt.Errorf("Transform function expects '%s' but we have '%T'", m.transformArgType, actual) } // call the Transform function with `actual` fn := reflect.ValueOf(m.Transform) - result := fn.Call([]reflect.Value{reflect.ValueOf(actual)}) + result := fn.Call([]reflect.Value{param}) + if len(result) == 2 { + if !result[1].IsNil() { + return false, fmt.Errorf("Transform function failed: %s", result[1].Interface().(error).Error()) + } + } m.transformedValue = result[0].Interface() // expect exactly one value return m.Matcher.Match(m.transformedValue) @@ -68,5 +86,5 @@ func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { // Querying the next matcher is fine if the transformer always will return the same value. // But if the transformer is non-deterministic and returns a different value each time, then there // is no point in querying the next matcher, since it can only comment on the last transformed value. - return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) + return types.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue) } diff --git a/vendor/github.com/onsi/gomega/tools b/vendor/github.com/onsi/gomega/tools new file mode 100644 index 000000000..e4195cf36 --- /dev/null +++ b/vendor/github.com/onsi/gomega/tools @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package main + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo" +) diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index ac59a3a5a..c315ef065 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -1,21 +1,35 @@ package types -type TWithHelper interface { - Helper() -} +import ( + "time" +) type GomegaFailHandler func(message string, callerSkip ...int) -type GomegaFailWrapper struct { - Fail GomegaFailHandler - TWithHelper TWithHelper -} - //A simple *testing.T interface wrapper type GomegaTestingT interface { + Helper() Fatalf(format string, args ...interface{}) } +// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers +type Gomega interface { + Ω(actual interface{}, extra ...interface{}) Assertion + Expect(actual interface{}, extra ...interface{}) Assertion + ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion + + Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion + EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + + Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion + ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + + SetDefaultEventuallyTimeout(time.Duration) + SetDefaultEventuallyPollingInterval(time.Duration) + SetDefaultConsistentlyDuration(time.Duration) + SetDefaultConsistentlyPollingInterval(time.Duration) +} + //All Gomega matchers must implement the GomegaMatcher interface // //For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers @@ -24,3 +38,50 @@ type GomegaMatcher interface { FailureMessage(actual interface{}) (message string) NegatedFailureMessage(actual interface{}) (message string) } + +/* +GomegaMatchers that also match the OracleMatcher interface can convey information about +whether or not their result will change upon future attempts. + +This allows `Eventually` and `Consistently` to short circuit if success becomes impossible. + +For example, a process' exit code can never change. So, gexec's Exit matcher returns `true` +for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. +*/ +type OracleMatcher interface { + MatchMayChangeInTheFuture(actual interface{}) bool +} + +func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool { + oracleMatcher, ok := matcher.(OracleMatcher) + if !ok { + return true + } + + return oracleMatcher.MatchMayChangeInTheFuture(value) +} + +// AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure +// they are eventually satisfied +type AsyncAssertion interface { + Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + + WithOffset(offset int) AsyncAssertion + WithTimeout(interval time.Duration) AsyncAssertion + WithPolling(interval time.Duration) AsyncAssertion +} + +// Assertions are returned by Ω and Expect and enable assertions against Gomega matchers +type Assertion interface { + Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + + To(matcher GomegaMatcher, optionalDescription ...interface{}) bool + ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool + + WithOffset(offset int) Assertion + + Error() Assertion +} diff --git a/vendor/github.com/openshift/api/machine/v1/common.go b/vendor/github.com/openshift/api/machine/v1/common.go new file mode 100644 index 000000000..941d22b1c --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/common.go @@ -0,0 +1,13 @@ +package v1 + +// InstanceTenancy indicates if instance should run on shared or single-tenant hardware. +type InstanceTenancy string + +const ( + // DefaultTenancy instance runs on shared hardware + DefaultTenancy InstanceTenancy = "default" + // DedicatedTenancy instance runs on single-tenant hardware + DedicatedTenancy InstanceTenancy = "dedicated" + // HostTenancy instance runs on a Dedicated Host, which is an isolated server with configurations that you can control. + HostTenancy InstanceTenancy = "host" +) diff --git a/vendor/github.com/openshift/api/machine/v1/doc.go b/vendor/github.com/openshift/api/machine/v1/doc.go new file mode 100644 index 000000000..7bd97c950 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=machine.openshift.io +package v1 diff --git a/vendor/github.com/openshift/api/machine/v1/register.go b/vendor/github.com/openshift/api/machine/v1/register.go new file mode 100644 index 000000000..4fb053eb6 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/register.go @@ -0,0 +1,35 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "machine.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + return nil +} diff --git a/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go new file mode 100644 index 000000000..9c73280fb --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go @@ -0,0 +1,364 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AlibabaDiskPerformanceLevel enum attribute to describe a disk's performance level +type AlibabaDiskPerformanceLevel string + +// AlibabaDiskCatagory enum attribute to deescribe a disk's category +type AlibabaDiskCategory string + +// AlibabaDiskEncryptionMode enum attribute to describe whether to enable or disable disk encryption +type AlibabaDiskEncryptionMode string + +// AlibabaDiskPreservationPolicy enum attribute to describe whether to preserve or delete a disk upon instance removal +type AlibabaDiskPreservationPolicy string + +// AlibabaResourceReferenceType enum attribute to identify the type of resource reference +type AlibabaResourceReferenceType string + +const ( + // DeleteWithInstance enum property to delete disk with instance deletion + DeleteWithInstance AlibabaDiskPreservationPolicy = "DeleteWithInstance" + // PreserveDisk enum property to determine disk preservation with instance deletion + PreserveDisk AlibabaDiskPreservationPolicy = "PreserveDisk" + + // AlibabaDiskEncryptionEnabled enum property to enable disk encryption + AlibabaDiskEncryptionEnabled AlibabaDiskEncryptionMode = "encrypted" + // AlibabaDiskEncryptionDisabled enum property to disable disk encryption + AlibabaDiskEncryptionDisabled AlibabaDiskEncryptionMode = "disabled" + + // AlibabaDiskPerformanceLevel0 enum property to set the level at PL0 + PL0 AlibabaDiskPerformanceLevel = "PL0" + // AlibabaDiskPerformanceLevel1 enum property to set the level at PL1 + PL1 AlibabaDiskPerformanceLevel = "PL1" + // AlibabaDiskPerformanceLevel2 enum property to set the level at PL2 + PL2 AlibabaDiskPerformanceLevel = "PL2" + // AlibabaDiskPerformanceLevel3 enum property to set the level at PL3 + PL3 AlibabaDiskPerformanceLevel = "PL3" + + // AlibabaDiskCategoryUltraDisk enum proprty to set the category of disk to ultra disk + AlibabaDiskCatagoryUltraDisk AlibabaDiskCategory = "cloud_efficiency" + // AlibabaDiskCategorySSD enum proprty to set the category of disk to standard SSD + AlibabaDiskCatagorySSD AlibabaDiskCategory = "cloud_ssd" + // AlibabaDiskCategoryESSD enum proprty to set the category of disk to ESSD + AlibabaDiskCatagoryESSD AlibabaDiskCategory = "cloud_essd" + // AlibabaDiskCategoryBasic enum proprty to set the category of disk to basic + AlibabaDiskCatagoryBasic AlibabaDiskCategory = "cloud" + + // AlibabaResourceReferenceTypeID enum property to identify an ID type resource reference + AlibabaResourceReferenceTypeID AlibabaResourceReferenceType = "ID" + // AlibabaResourceReferenceTypeName enum property to identify an Name type resource reference + AlibabaResourceReferenceTypeName AlibabaResourceReferenceType = "Name" + // AlibabaResourceReferenceTypeTags enum property to identify a tags type resource reference + AlibabaResourceReferenceTypeTags AlibabaResourceReferenceType = "Tags" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AlibabaCloudMachineProviderConfig is the Schema for the alibabacloudmachineproviderconfig API +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:openapi-gen=true +type AlibabaCloudMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // More detail about alibabacloud ECS + // https://www.alibabacloud.com/help/doc-detail/25499.htm?spm=a2c63.l28256.b99.727.496d7453jF7Moz + + //The instance type of the instance. + InstanceType string `json:"instanceType"` + + // The ID of the vpc + VpcID string `json:"vpcId"` + + // The ID of the region in which to create the instance. You can call the DescribeRegions operation to query the most recent region list. + RegionID string `json:"regionId"` + + // The ID of the zone in which to create the instance. You can call the DescribeZones operation to query the most recent region list. + ZoneID string `json:"zoneId"` + + // The ID of the image used to create the instance. + ImageID string `json:"imageId"` + + // DataDisks holds information regarding the extra disks attached to the instance + // +optional + DataDisks []DataDiskProperties `json:"dataDisk,omitempty"` + + // SecurityGroups is a list of security group references to assign to the instance. + // A reference holds either the security group ID, the resource name, or the required tags to search. + // When more than one security group is returned for a tag search, all the groups are associated with the instance up to the + // maximum number of security groups to which an instance can belong. + // For more information, see the "Security group limits" section in Limits. + // https://www.alibabacloud.com/help/en/doc-detail/25412.htm + SecurityGroups []AlibabaResourceReference `json:"securityGroups,omitempty"` + + // Bandwidth describes the internet bandwidth strategy for the instance + // +optional + Bandwidth BandwidthProperties `json:"bandwidth,omitempty"` + + // SystemDisk holds the properties regarding the system disk for the instance + // +optional + SystemDisk SystemDiskProperties `json:"systemDisk,omitempty"` + + // VSwitch is a reference to the vswitch to use for this instance. + // A reference holds either the vSwitch ID, the resource name, or the required tags to search. + // When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. + // This parameter is required when you create an instance of the VPC type. + // You can call the DescribeVSwitches operation to query the created vSwitches. + VSwitch AlibabaResourceReference `json:"vSwitch"` + + // RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role. + // +optional + RAMRoleName string `json:"ramRoleName,omitempty"` + + // ResourceGroup references the resource group to which to assign the instance. + // A reference holds either the resource group ID, the resource name, or the required tags to search. + // When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. + // Resource Groups do not support searching by tags. + ResourceGroup AlibabaResourceReference `json:"resourceGroup"` + + // Tenancy specifies whether to create the instance on a dedicated host. + // Valid values: + // + // default: creates the instance on a non-dedicated host. + // host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `default`. + // +optional + Tenancy InstanceTenancy `json:"tenancy,omitempty"` + + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + + // CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions + // provided by attached RAM role where the actuator is running. + // +optional + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + + // Tags are the set of metadata to add to an instance. + // +optional + Tags []Tag `json:"tag,omitempty"` +} + +// ResourceTagReference is a reference to a specific AlibabaCloud resource by ID, or tags. +// Only one of ID or Tags may be specified. Specifying more than one will result in +// a validation error. +type AlibabaResourceReference struct { + // type identifies the resource reference type for this entry. + Type AlibabaResourceReferenceType `json:"type"` + + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + + // Name of the resource + // +optional + Name *string `json:"name,omitempty"` + + // Tags is a set of metadata based upon ECS object tags used to identify a resource. + // For details about usage when multiple resources are found, please see the owning parent field documentation. + // +optional + Tags *[]Tag `json:"tags,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AlibabaCloudMachineProviderConfigList contains a list of AlibabaCloudMachineProviderConfig +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AlibabaCloudMachineProviderConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AlibabaCloudMachineProviderConfig `json:"items"` +} + +// AlibabaCloudMachineProviderStatus is the Schema for the alibabacloudmachineproviderconfig API +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AlibabaCloudMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // InstanceID is the instance ID of the machine created in alibabacloud + // +optional + InstanceID *string `json:"instanceId,omitempty"` + + // InstanceState is the state of the alibabacloud instance for this machine + // +optional + InstanceState *string `json:"instanceState,omitempty"` + + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category +type SystemDiskProperties struct { + // Category is the category of the system disk. + // Valid values: + // cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. + // cloud_efficiency: ultra disk. + // cloud_ssd: standard SSD. + // cloud: basic disk. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. + // Currently for other instances, the default is `cloud_efficiency`. + // +kubebuilder:validation:Enum="cloud_efficiency"; "cloud_ssd"; "cloud_essd"; "cloud" + // +optional + Category string `json:"category,omitempty"` + + // PerformanceLevel is the performance level of the ESSD used as the system disk. + // Valid values: + // + // PL0: A single ESSD can deliver up to 10,000 random read/write IOPS. + // PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. + // PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. + // PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is `PL1`. + // For more information about ESSD performance levels, see ESSDs. + // +kubebuilder:validation:Enum="PL0"; "PL1"; "PL2"; "PL3" + // +optional + PerformanceLevel string `json:"performanceLevel,omitempty"` + + // Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). + // Empty value means the platform chooses a default, which is subject to change over time. + // Currently the default is `""`. + // +kubebuilder:validation:MaxLength=128 + // +optional + Name string `json:"name,omitempty"` + + // Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. + // The value must be at least 20 and greater than or equal to the size of the image. + // Empty value means the platform chooses a default, which is subject to change over time. + // Currently the default is `40` or the size of the image depending on whichever is greater. + // +optional + Size int64 `json:"size,omitempty"` +} + +// DataDisk contains the information regarding the datadisk attached to an instance +type DataDiskProperties struct { + // Name is the name of data disk N. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). + // + // Empty value means the platform chooses a default, which is subject to change over time. + // Currently the default is `""`. + // +optional + Name string `name:"diskName,omitempty"` + + // SnapshotID is the ID of the snapshot used to create data disk N. Valid values of N: 1 to 16. + // + // When the DataDisk.N.SnapshotID parameter is specified, the DataDisk.N.Size parameter is ignored. The data disk is created based on the size of the specified snapshot. + // Use snapshots created after July 15, 2013. Otherwise, an error is returned and your request is rejected. + // + // +optional + SnapshotID string `name:"snapshotId,omitempty"` + + // Size of the data disk N. Valid values of N: 1 to 16. Unit: GiB. Valid values: + // + // Valid values when DataDisk.N.Category is set to cloud_efficiency: 20 to 32768 + // Valid values when DataDisk.N.Category is set to cloud_ssd: 20 to 32768 + // Valid values when DataDisk.N.Category is set to cloud_essd: 20 to 32768 + // Valid values when DataDisk.N.Category is set to cloud: 5 to 2000 + // The value of this parameter must be greater than or equal to the size of the snapshot specified by the SnapshotID parameter. + // +optional + Size int64 `name:"size,omitempty"` + + // DiskEncryption specifies whether to encrypt data disk N. + // + // Empty value means the platform chooses a default, which is subject to change over time. + // Currently the default is `disabled`. + // +kubebuilder:validation:Enum="encrypted";"disabled" + // +optional + DiskEncryption AlibabaDiskEncryptionMode `name:"diskEncryption,omitempty"` + + // PerformanceLevel is the performance level of the ESSD used as as data disk N. The N value must be the same as that in DataDisk.N.Category when DataDisk.N.Category is set to cloud_essd. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is `PL1`. + // Valid values: + // + // PL0: A single ESSD can deliver up to 10,000 random read/write IOPS. + // PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. + // PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. + // PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. + // For more information about ESSD performance levels, see ESSDs. + // +kubebuilder:validation:Enum="PL0"; "PL1"; "PL2"; "PL3" + // +optional + PerformanceLevel AlibabaDiskPerformanceLevel `name:"performanceLevel,omitempty"` + + // Category describes the type of data disk N. + // Valid values: + // cloud_efficiency: ultra disk + // cloud_ssd: standard SSD + // cloud_essd: ESSD + // cloud: basic disk + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. + // Currently for other instances, the default is `cloud_efficiency`. + // +kubebuilder:validation:Enum="cloud_efficiency"; "cloud_ssd"; "cloud_essd"; "cloud" + // +optional + Category AlibabaDiskCategory `name:"category,omitempty"` + + // KMSKeyID is the ID of the Key Management Service (KMS) key to be used by data disk N. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `""` which is interpreted as do not use KMSKey encryption. + // +optional + KMSKeyID string `name:"kmsKeyId,omitempty"` + + // DiskPreservation specifies whether to release data disk N along with the instance. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `DeleteWithInstance` + // +kubebuilder:validation:Enum="DeleteWithInstance";"PreserveDisk" + // +optional + DiskPreservation AlibabaDiskPreservationPolicy `name:"diskPreservation,omitempty"` +} + +// Tag The tags of ECS Instance +type Tag struct { + // Key is the name of the key pair + Key string `name:"Key"` + // Value is the value or data of the key pair + Value string `name:"value"` +} + +// Bandwidth describes the bandwidth strategy for the network of the instance +type BandwidthProperties struct { + // InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: + // When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. + // Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. + // When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. + // Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10. + // +optional + InternetMaxBandwidthIn int64 `json:"internetMaxBandwidthIn,omitempty"` + + // InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. + // When a value greater than 0 is used then a public IP address is assigned to the instance. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `0` + // +optional + InternetMaxBandwidthOut int64 `json:"internetMaxBandwidthOut,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go new file mode 100644 index 000000000..5781e614c --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go @@ -0,0 +1,117 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NutanixMachineProviderConfig is the Schema for the nutanixmachineproviderconfigs API +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:openapi-gen=true +type NutanixMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // cluster is to identify the cluster (the Prism Element under management + // of the Prism Central), in which the Machine's VM will be created. + // The cluster identifier (uuid or name) can be obtained from the Prism Central console + // or using the prism_central API. + // +kubebuilder:validation:Required + Cluster NutanixResourceIdentifier `json:"cluster"` + + // image is to identify the rhcos image uploaded to the Prism Central (PC) + // The image identifier (uuid or name) can be obtained from the Prism Central console + // or using the prism_central API. + // +kubebuilder:validation:Required + Image NutanixResourceIdentifier `json:"image"` + + // subnets holds a list of identifiers (one or more) of the cluster's network subnets + // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be + // obtained from the Prism Central console or using the prism_central API. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + Subnets []NutanixResourceIdentifier `json:"subnets"` + + // vcpusPerSocket is the number of vCPUs per socket of the VM + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + VCPUsPerSocket int32 `json:"vcpusPerSocket"` + + // vcpuSockets is the number of vCPU sockets of the VM + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + VCPUSockets int32 `json:"vcpuSockets"` + + // memorySize is the memory size (in Quantity format) of the VM + // The minimum memorySize is 2Gi bytes + // +kubebuilder:validation:Required + MemorySize resource.Quantity `json:"memorySize"` + + // systemDiskSize is size (in Quantity format) of the system disk of the VM + // The minimum systemDiskSize is 20Gi bytes + // +kubebuilder:validation:Required + SystemDiskSize resource.Quantity `json:"systemDiskSize"` + + // userDataSecret is a local reference to a secret that contains the + // UserData to apply to the VM + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + + // credentialsSecret is a local reference to a secret that contains the + // credentials data to access Nutanix PC client + // +kubebuilder:validation:Required + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret"` +} + +// NutanixIdentifierType is an enumeration of different resource identifier types. +type NutanixIdentifierType string + +const ( + // NutanixIdentifierUUID is a resource identifier identifying the object by UUID. + NutanixIdentifierUUID NutanixIdentifierType = "uuid" + + // NutanixIdentifierName is a resource identifier identifying the object by Name. + NutanixIdentifierName NutanixIdentifierType = "name" +) + +// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) +// +union +type NutanixResourceIdentifier struct { + // Type is the identifier type to use for this resource. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum:=uuid;name + Type NutanixIdentifierType `json:"type"` + + // uuid is the UUID of the resource in the PC. + // +optional + UUID *string `json:"uuid,omitempty"` + + // name is the resource name in the PC + // +optional + Name *string `json:"name,omitempty"` +} + +// NutanixMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains nutanix-specific status information. +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type NutanixMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // vmUUID is the Machine associated VM's UUID + // The field is missing before the VM is created. + // Once the VM is created, the field is filled with the VM's UUID and it will not change. + // The vmUUID is used to find the VM when updating the Machine status, + // and to delete the VM when the Machine is deleted. + // +optional + VmUUID *string `json:"vmUUID,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go new file mode 100644 index 000000000..7c0ffbe9b --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go @@ -0,0 +1,192 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PowerVSResourceType enum attribute to identify the type of resource reference +type PowerVSResourceType string + +// PowerVSProcessorType enum attribute to identify the PowerVS instance processor type +type PowerVSProcessorType string + +const ( + // PowerVSResourceTypeID enum property to identify an ID type resource reference + PowerVSResourceTypeID PowerVSResourceType = "ID" + // PowerVSResourceTypeName enum property to identify a Name type resource reference + PowerVSResourceTypeName PowerVSResourceType = "Name" + // PowerVSResourceTypeRegEx enum property to identify a tags type resource reference + PowerVSResourceTypeRegEx PowerVSResourceType = "RegEx" + // PowerVSProcessorTypeDedicated enum property to identify a Dedicated Power VS processor type + PowerVSProcessorTypeDedicated PowerVSProcessorType = "Dedicated" + // PowerVSProcessorTypeShared enum property to identify a Shared Power VS processor type + PowerVSProcessorTypeShared PowerVSProcessorType = "Shared" + // PowerVSProcessorTypeCapped enum property to identify a Capped Power VS processor type + PowerVSProcessorTypeCapped PowerVSProcessorType = "Capped" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PowerVSMachineProviderConfig is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for a PowerVS virtual machine. It is used by the PowerVS machine actuator to create a single Machine. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:openapi-gen=true +type PowerVSMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // userDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance. + // +optional + UserDataSecret *PowerVSSecretReference `json:"userDataSecret,omitempty"` + + // credentialsSecret is a reference to the secret with IBM Cloud credentials. + // +optional + CredentialsSecret *PowerVSSecretReference `json:"credentialsSecret,omitempty"` + + // serviceInstance is the reference to the Power VS service on which the server instance(VM) will be created. + // Power VS service is a container for all Power VS instances at a specific geographic region. + // serviceInstance can be created via IBM Cloud catalog or CLI. + // supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + // More detail about Power VS service instance. + // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server + // +kubebuilder:validation:=Required + ServiceInstance PowerVSResource `json:"serviceInstance"` + + // image is to identify the rhcos image uploaded to IBM COS bucket which is used to create the instance. + // supported image identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + // +kubebuilder:validation:=Required + Image PowerVSResource `json:"image"` + + // network is the reference to the Network to use for this instance. + // supported network identifier in PowerVSResource are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli. + // +kubebuilder:validation:=Required + Network PowerVSResource `json:"network"` + + // keyPairName is the name of the KeyPair to use for SSH. + // The key pair will be exposed to the instance via the instance metadata service. + // On boot, the OS will copy the public keypair into the authorized keys for the core user. + // +kubebuilder:validation:=Required + KeyPairName string `json:"keyPairName"` + + // systemType is the System type used to host the instance. + // systemType determines the number of cores and memory that is available. + // Few of the supported SystemTypes are s922,e880,e980. + // e880 systemType available only in Dallas Datacenters. + // e980 systemType available in Datacenters except Dallas and Washington. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default, which is subject to change over time. The current default is s922 which is generally available. + // + This is not an enum because we expect other values to be added later which should be supported implicitly. + // +optional + SystemType string `json:"systemType,omitempty"` + + // processorType is the VM instance processor type. + // It must be set to one of the following values: Dedicated, Capped or Shared. + // Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. + // Shared: Shared among other clients. + // Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. + // if the processorType is selected as Dedicated, then processors value cannot be fractional. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default, which is subject to change over time. The current default is Shared. + // +kubebuilder:validation:Enum:="Dedicated";"Shared";"Capped";"" + // +optional + ProcessorType PowerVSProcessorType `json:"processorType,omitempty"` + + // processors is the number of virtual processors in a virtual machine. + // when the processorType is selected as Dedicated the processors value cannot be fractional. + // maximum value for the Processors depends on the selected SystemType. + // when SystemType is set to e880 or e980 maximum Processors value is 143. + // when SystemType is set to s922 maximum Processors value is 15. + // minimum value for Processors depends on the selected ProcessorType. + // when ProcessorType is set as Shared or Capped, The minimum processors is 0.5. + // when ProcessorType is set as Dedicated, The minimum processors is 1. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType. + // when ProcessorType selected as Dedicated, the default is set to 1. + // when ProcessorType selected as Shared or Capped, the default is set to 0.5. + // +optional + Processors intstr.IntOrString `json:"processors,omitempty"` + + // memoryGiB is the size of a virtual machine's memory, in GiB. + // maximum value for the MemoryGiB depends on the selected SystemType. + // when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB. + // when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB. + // when SystemType is set to s922 maximum MemoryGiB value is 942 GiB. + // The minimum memory is 32 GiB. + // When omitted, this means the user has no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. The current default is 32. + // +optional + MemoryGiB int32 `json:"memoryGiB,omitempty"` +} + +// PowerVSResource is a reference to a specific PowerVS resource by ID, Name or RegEx +// Only one of ID, Name or RegEx may be specified. Specifying more than one will result in +// a validation error. +// +union +type PowerVSResource struct { + // Type identifies the resource type for this entry. + // Valid values are ID, Name and RegEx + // +kubebuilder:validation:Enum:=ID;Name;RegEx + // +optional + Type PowerVSResourceType `json:"type,omitempty"` + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + // Name of resource + // +optional + Name *string `json:"name,omitempty"` + // Regex to find resource + // Regex contains the pattern to match to find a resource + // +optional + RegEx *string `json:"regex,omitempty"` +} + +// PowerVSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains PowerVS-specific status information. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PowerVSMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // instanceId is the instance ID of the machine created in PowerVS + // instanceId uniquely identifies a Power VS server instance(VM) under a Power VS service. + // This will help in updating or deleting a VM in Power VS Cloud + // +optional + InstanceID *string `json:"instanceId,omitempty"` + + // serviceInstanceID is the reference to the Power VS ServiceInstance on which the machine instance will be created. + // serviceInstanceID uniquely identifies the Power VS service + // By setting serviceInstanceID it will become easy and efficient to fetch a server instance(VM) within Power VS Cloud. + // +optional + ServiceInstanceID *string `json:"serviceInstanceID,omitempty"` + + // instanceState is the state of the PowerVS instance for this machine + // Possible instance states are Active, Build, ShutOff, Reboot + // This is used to display additional information to user regarding instance current state + // +optional + InstanceState *string `json:"instanceState,omitempty"` +} + +// PowerVSSecretReference contains enough information to locate the +// referenced secret inside the same namespace. +// +structType=atomic +type PowerVSSecretReference struct { + // Name of the secret. + // +optional + Name string `json:"name,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..291844955 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go @@ -0,0 +1,488 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudMachineProviderConfig) DeepCopyInto(out *AlibabaCloudMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.DataDisks != nil { + in, out := &in.DataDisks, &out.DataDisks + *out = make([]DataDiskProperties, len(*in)) + copy(*out, *in) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]AlibabaResourceReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Bandwidth = in.Bandwidth + out.SystemDisk = in.SystemDisk + in.VSwitch.DeepCopyInto(&out.VSwitch) + in.ResourceGroup.DeepCopyInto(&out.ResourceGroup) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]Tag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudMachineProviderConfig. +func (in *AlibabaCloudMachineProviderConfig) DeepCopy() *AlibabaCloudMachineProviderConfig { + if in == nil { + return nil + } + out := new(AlibabaCloudMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlibabaCloudMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudMachineProviderConfigList) DeepCopyInto(out *AlibabaCloudMachineProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AlibabaCloudMachineProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudMachineProviderConfigList. +func (in *AlibabaCloudMachineProviderConfigList) DeepCopy() *AlibabaCloudMachineProviderConfigList { + if in == nil { + return nil + } + out := new(AlibabaCloudMachineProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlibabaCloudMachineProviderConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudMachineProviderStatus) DeepCopyInto(out *AlibabaCloudMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudMachineProviderStatus. +func (in *AlibabaCloudMachineProviderStatus) DeepCopy() *AlibabaCloudMachineProviderStatus { + if in == nil { + return nil + } + out := new(AlibabaCloudMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlibabaCloudMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaResourceReference) DeepCopyInto(out *AlibabaResourceReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = new([]Tag) + if **in != nil { + in, out := *in, *out + *out = make([]Tag, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaResourceReference. +func (in *AlibabaResourceReference) DeepCopy() *AlibabaResourceReference { + if in == nil { + return nil + } + out := new(AlibabaResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BandwidthProperties) DeepCopyInto(out *BandwidthProperties) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandwidthProperties. +func (in *BandwidthProperties) DeepCopy() *BandwidthProperties { + if in == nil { + return nil + } + out := new(BandwidthProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDiskProperties) DeepCopyInto(out *DataDiskProperties) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDiskProperties. +func (in *DataDiskProperties) DeepCopy() *DataDiskProperties { + if in == nil { + return nil + } + out := new(DataDiskProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixMachineProviderConfig) DeepCopyInto(out *NutanixMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Cluster.DeepCopyInto(&out.Cluster) + in.Image.DeepCopyInto(&out.Image) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]NutanixResourceIdentifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.MemorySize = in.MemorySize.DeepCopy() + out.SystemDiskSize = in.SystemDiskSize.DeepCopy() + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixMachineProviderConfig. +func (in *NutanixMachineProviderConfig) DeepCopy() *NutanixMachineProviderConfig { + if in == nil { + return nil + } + out := new(NutanixMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NutanixMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixMachineProviderStatus) DeepCopyInto(out *NutanixMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VmUUID != nil { + in, out := &in.VmUUID, &out.VmUUID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixMachineProviderStatus. +func (in *NutanixMachineProviderStatus) DeepCopy() *NutanixMachineProviderStatus { + if in == nil { + return nil + } + out := new(NutanixMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NutanixMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixResourceIdentifier) DeepCopyInto(out *NutanixResourceIdentifier) { + *out = *in + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixResourceIdentifier. +func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier { + if in == nil { + return nil + } + out := new(NutanixResourceIdentifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSMachineProviderConfig) DeepCopyInto(out *PowerVSMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(PowerVSSecretReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(PowerVSSecretReference) + **out = **in + } + in.ServiceInstance.DeepCopyInto(&out.ServiceInstance) + in.Image.DeepCopyInto(&out.Image) + in.Network.DeepCopyInto(&out.Network) + out.Processors = in.Processors + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSMachineProviderConfig. +func (in *PowerVSMachineProviderConfig) DeepCopy() *PowerVSMachineProviderConfig { + if in == nil { + return nil + } + out := new(PowerVSMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PowerVSMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSMachineProviderStatus) DeepCopyInto(out *PowerVSMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.ServiceInstanceID != nil { + in, out := &in.ServiceInstanceID, &out.ServiceInstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSMachineProviderStatus. +func (in *PowerVSMachineProviderStatus) DeepCopy() *PowerVSMachineProviderStatus { + if in == nil { + return nil + } + out := new(PowerVSMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PowerVSMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSResource) DeepCopyInto(out *PowerVSResource) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegEx != nil { + in, out := &in.RegEx, &out.RegEx + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSResource. +func (in *PowerVSResource) DeepCopy() *PowerVSResource { + if in == nil { + return nil + } + out := new(PowerVSResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSSecretReference) DeepCopyInto(out *PowerVSSecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSSecretReference. +func (in *PowerVSSecretReference) DeepCopy() *PowerVSSecretReference { + if in == nil { + return nil + } + out := new(PowerVSSecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemDiskProperties) DeepCopyInto(out *SystemDiskProperties) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemDiskProperties. +func (in *SystemDiskProperties) DeepCopy() *SystemDiskProperties { + if in == nil { + return nil + } + out := new(SystemDiskProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tag) DeepCopyInto(out *Tag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tag. +func (in *Tag) DeepCopy() *Tag { + if in == nil { + return nil + } + out := new(Tag) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..ce084c5eb --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,206 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AlibabaCloudMachineProviderConfig = map[string]string{ + "": "AlibabaCloudMachineProviderConfig is the Schema for the alibabacloudmachineproviderconfig API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "instanceType": "The instance type of the instance.", + "vpcId": "The ID of the vpc", + "regionId": "The ID of the region in which to create the instance. You can call the DescribeRegions operation to query the most recent region list.", + "zoneId": "The ID of the zone in which to create the instance. You can call the DescribeZones operation to query the most recent region list.", + "imageId": "The ID of the image used to create the instance.", + "dataDisk": "DataDisks holds information regarding the extra disks attached to the instance", + "securityGroups": "SecurityGroups is a list of security group references to assign to the instance. A reference holds either the security group ID, the resource name, or the required tags to search. When more than one security group is returned for a tag search, all the groups are associated with the instance up to the maximum number of security groups to which an instance can belong. For more information, see the \"Security group limits\" section in Limits. https://www.alibabacloud.com/help/en/doc-detail/25412.htm", + "bandwidth": "Bandwidth describes the internet bandwidth strategy for the instance", + "systemDisk": "SystemDisk holds the properties regarding the system disk for the instance", + "vSwitch": "VSwitch is a reference to the vswitch to use for this instance. A reference holds either the vSwitch ID, the resource name, or the required tags to search. When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. This parameter is required when you create an instance of the VPC type. You can call the DescribeVSwitches operation to query the created vSwitches.", + "ramRoleName": "RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.", + "resourceGroup": "ResourceGroup references the resource group to which to assign the instance. A reference holds either the resource group ID, the resource name, or the required tags to search. When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. Resource Groups do not support searching by tags.", + "tenancy": "Tenancy specifies whether to create the instance on a dedicated host. Valid values:\n\ndefault: creates the instance on a non-dedicated host. host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `default`.", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions provided by attached RAM role where the actuator is running.", + "tag": "Tags are the set of metadata to add to an instance.", +} + +func (AlibabaCloudMachineProviderConfig) SwaggerDoc() map[string]string { + return map_AlibabaCloudMachineProviderConfig +} + +var map_AlibabaCloudMachineProviderConfigList = map[string]string{ + "": "AlibabaCloudMachineProviderConfigList contains a list of AlibabaCloudMachineProviderConfig Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (AlibabaCloudMachineProviderConfigList) SwaggerDoc() map[string]string { + return map_AlibabaCloudMachineProviderConfigList +} + +var map_AlibabaCloudMachineProviderStatus = map[string]string{ + "": "AlibabaCloudMachineProviderStatus is the Schema for the alibabacloudmachineproviderconfig API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the instance ID of the machine created in alibabacloud", + "instanceState": "InstanceState is the state of the alibabacloud instance for this machine", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", +} + +func (AlibabaCloudMachineProviderStatus) SwaggerDoc() map[string]string { + return map_AlibabaCloudMachineProviderStatus +} + +var map_AlibabaResourceReference = map[string]string{ + "": "ResourceTagReference is a reference to a specific AlibabaCloud resource by ID, or tags. Only one of ID or Tags may be specified. Specifying more than one will result in a validation error.", + "type": "type identifies the resource reference type for this entry.", + "id": "ID of resource", + "name": "Name of the resource", + "tags": "Tags is a set of metadata based upon ECS object tags used to identify a resource. For details about usage when multiple resources are found, please see the owning parent field documentation.", +} + +func (AlibabaResourceReference) SwaggerDoc() map[string]string { + return map_AlibabaResourceReference +} + +var map_BandwidthProperties = map[string]string{ + "": "Bandwidth describes the bandwidth strategy for the network of the instance", + "internetMaxBandwidthIn": "InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.", + "internetMaxBandwidthOut": "InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. When a value greater than 0 is used then a public IP address is assigned to the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `0`", +} + +func (BandwidthProperties) SwaggerDoc() map[string]string { + return map_BandwidthProperties +} + +var map_DataDiskProperties = map[string]string{ + "": "DataDisk contains the information regarding the datadisk attached to an instance", + "Name": "Name is the name of data disk N. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).\n\nEmpty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", + "SnapshotID": "SnapshotID is the ID of the snapshot used to create data disk N. Valid values of N: 1 to 16.\n\nWhen the DataDisk.N.SnapshotID parameter is specified, the DataDisk.N.Size parameter is ignored. The data disk is created based on the size of the specified snapshot. Use snapshots created after July 15, 2013. Otherwise, an error is returned and your request is rejected.", + "Size": "Size of the data disk N. Valid values of N: 1 to 16. Unit: GiB. Valid values:\n\nValid values when DataDisk.N.Category is set to cloud_efficiency: 20 to 32768 Valid values when DataDisk.N.Category is set to cloud_ssd: 20 to 32768 Valid values when DataDisk.N.Category is set to cloud_essd: 20 to 32768 Valid values when DataDisk.N.Category is set to cloud: 5 to 2000 The value of this parameter must be greater than or equal to the size of the snapshot specified by the SnapshotID parameter.", + "DiskEncryption": "DiskEncryption specifies whether to encrypt data disk N.\n\nEmpty value means the platform chooses a default, which is subject to change over time. Currently the default is `disabled`.", + "PerformanceLevel": "PerformanceLevel is the performance level of the ESSD used as as data disk N. The N value must be the same as that in DataDisk.N.Category when DataDisk.N.Category is set to cloud_essd. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. For more information about ESSD performance levels, see ESSDs.", + "Category": "Category describes the type of data disk N. Valid values: cloud_efficiency: ultra disk cloud_ssd: standard SSD cloud_essd: ESSD cloud: basic disk Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", + "KMSKeyID": "KMSKeyID is the ID of the Key Management Service (KMS) key to be used by data disk N. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `\"\"` which is interpreted as do not use KMSKey encryption.", + "DiskPreservation": "DiskPreservation specifies whether to release data disk N along with the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `DeleteWithInstance`", +} + +func (DataDiskProperties) SwaggerDoc() map[string]string { + return map_DataDiskProperties +} + +var map_SystemDiskProperties = map[string]string{ + "": "SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category", + "category": "Category is the category of the system disk. Valid values: cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. cloud_efficiency: ultra disk. cloud_ssd: standard SSD. cloud: basic disk. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", + "performanceLevel": "PerformanceLevel is the performance level of the ESSD used as the system disk. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. For more information about ESSD performance levels, see ESSDs.", + "name": "Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", + "size": "Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. The value must be at least 20 and greater than or equal to the size of the image. Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `40` or the size of the image depending on whichever is greater.", +} + +func (SystemDiskProperties) SwaggerDoc() map[string]string { + return map_SystemDiskProperties +} + +var map_Tag = map[string]string{ + "": "Tag The tags of ECS Instance", + "Key": "Key is the name of the key pair", + "Value": "Value is the value or data of the key pair", +} + +func (Tag) SwaggerDoc() map[string]string { + return map_Tag +} + +var map_NutanixMachineProviderConfig = map[string]string{ + "": "NutanixMachineProviderConfig is the Schema for the nutanixmachineproviderconfigs API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "image": "image is to identify the rhcos image uploaded to the Prism Central (PC) The image identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "vcpusPerSocket": "vcpusPerSocket is the number of vCPUs per socket of the VM", + "vcpuSockets": "vcpuSockets is the number of vCPU sockets of the VM", + "memorySize": "memorySize is the memory size (in Quantity format) of the VM The minimum memorySize is 2Gi bytes", + "systemDiskSize": "systemDiskSize is size (in Quantity format) of the system disk of the VM The minimum systemDiskSize is 20Gi bytes", + "userDataSecret": "userDataSecret is a local reference to a secret that contains the UserData to apply to the VM", + "credentialsSecret": "credentialsSecret is a local reference to a secret that contains the credentials data to access Nutanix PC client", +} + +func (NutanixMachineProviderConfig) SwaggerDoc() map[string]string { + return map_NutanixMachineProviderConfig +} + +var map_NutanixMachineProviderStatus = map[string]string{ + "": "NutanixMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains nutanix-specific status information. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", + "vmUUID": "vmUUID is the Machine associated VM's UUID The field is missing before the VM is created. Once the VM is created, the field is filled with the VM's UUID and it will not change. The vmUUID is used to find the VM when updating the Machine status, and to delete the VM when the Machine is deleted.", +} + +func (NutanixMachineProviderStatus) SwaggerDoc() map[string]string { + return map_NutanixMachineProviderStatus +} + +var map_NutanixResourceIdentifier = map[string]string{ + "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)", + "type": "Type is the identifier type to use for this resource.", + "uuid": "uuid is the UUID of the resource in the PC.", + "name": "name is the resource name in the PC", +} + +func (NutanixResourceIdentifier) SwaggerDoc() map[string]string { + return map_NutanixResourceIdentifier +} + +var map_PowerVSMachineProviderConfig = map[string]string{ + "": "PowerVSMachineProviderConfig is the type that will be embedded in a Machine.Spec.ProviderSpec field for a PowerVS virtual machine. It is used by the PowerVS machine actuator to create a single Machine.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance.", + "credentialsSecret": "credentialsSecret is a reference to the secret with IBM Cloud credentials.", + "serviceInstance": "serviceInstance is the reference to the Power VS service on which the server instance(VM) will be created. Power VS service is a container for all Power VS instances at a specific geographic region. serviceInstance can be created via IBM Cloud catalog or CLI. supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. More detail about Power VS service instance. https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server", + "image": "image is to identify the rhcos image uploaded to IBM COS bucket which is used to create the instance. supported image identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli.", + "network": "network is the reference to the Network to use for this instance. supported network identifier in PowerVSResource are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli.", + "keyPairName": "keyPairName is the name of the KeyPair to use for SSH. The key pair will be exposed to the instance via the instance metadata service. On boot, the OS will copy the public keypair into the authorized keys for the core user.", + "systemType": "systemType is the System type used to host the instance. systemType determines the number of cores and memory that is available. Few of the supported SystemTypes are s922,e880,e980. e880 systemType available only in Dallas Datacenters. e980 systemType available in Datacenters except Dallas and Washington. When omitted, this means that the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is s922 which is generally available.", + "processorType": "processorType is the VM instance processor type. It must be set to one of the following values: Dedicated, Capped or Shared. Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. Shared: Shared among other clients. Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. if the processorType is selected as Dedicated, then processors value cannot be fractional. When omitted, this means that the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Shared.", + "processors": "processors is the number of virtual processors in a virtual machine. when the processorType is selected as Dedicated the processors value cannot be fractional. maximum value for the Processors depends on the selected SystemType. when SystemType is set to e880 or e980 maximum Processors value is 143. when SystemType is set to s922 maximum Processors value is 15. minimum value for Processors depends on the selected ProcessorType. when ProcessorType is set as Shared or Capped, The minimum processors is 0.5. when ProcessorType is set as Dedicated, The minimum processors is 1. When omitted, this means that the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType. when ProcessorType selected as Dedicated, the default is set to 1. when ProcessorType selected as Shared or Capped, the default is set to 0.5.", + "memoryGiB": "memoryGiB is the size of a virtual machine's memory, in GiB. maximum value for the MemoryGiB depends on the selected SystemType. when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB. when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB. when SystemType is set to s922 maximum MemoryGiB value is 942 GiB. The minimum memory is 32 GiB. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 32.", +} + +func (PowerVSMachineProviderConfig) SwaggerDoc() map[string]string { + return map_PowerVSMachineProviderConfig +} + +var map_PowerVSMachineProviderStatus = map[string]string{ + "": "PowerVSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains PowerVS-specific status information.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", + "instanceId": "instanceId is the instance ID of the machine created in PowerVS instanceId uniquely identifies a Power VS server instance(VM) under a Power VS service. This will help in updating or deleting a VM in Power VS Cloud", + "serviceInstanceID": "serviceInstanceID is the reference to the Power VS ServiceInstance on which the machine instance will be created. serviceInstanceID uniquely identifies the Power VS service By setting serviceInstanceID it will become easy and efficient to fetch a server instance(VM) within Power VS Cloud.", + "instanceState": "instanceState is the state of the PowerVS instance for this machine Possible instance states are Active, Build, ShutOff, Reboot This is used to display additional information to user regarding instance current state", +} + +func (PowerVSMachineProviderStatus) SwaggerDoc() map[string]string { + return map_PowerVSMachineProviderStatus +} + +var map_PowerVSResource = map[string]string{ + "": "PowerVSResource is a reference to a specific PowerVS resource by ID, Name or RegEx Only one of ID, Name or RegEx may be specified. Specifying more than one will result in a validation error.", + "type": "Type identifies the resource type for this entry. Valid values are ID, Name and RegEx", + "id": "ID of resource", + "name": "Name of resource", + "regex": "Regex to find resource Regex contains the pattern to match to find a resource", +} + +func (PowerVSResource) SwaggerDoc() map[string]string { + return map_PowerVSResource +} + +var map_PowerVSSecretReference = map[string]string{ + "": "PowerVSSecretReference contains enough information to locate the referenced secret inside the same namespace.", + "name": "Name of the secret.", +} + +func (PowerVSSecretReference) SwaggerDoc() map[string]string { + return map_PowerVSSecretReference +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml new file mode 100644 index 000000000..180831bb4 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml @@ -0,0 +1,326 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + api-approved.openshift.io: https://github.com/openshift/api/pull/948 + name: machines.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: Machine + listKind: MachineList + plural: machines + singular: machine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Phase of machine + jsonPath: .status.phase + name: Phase + type: string + - description: Type of instance + jsonPath: .metadata.labels['machine\.openshift\.io/instance-type'] + name: Type + type: string + - description: Region associated with machine + jsonPath: .metadata.labels['machine\.openshift\.io/region'] + name: Region + type: string + - description: Zone associated with machine + jsonPath: .metadata.labels['machine\.openshift\.io/zone'] + name: Zone + type: string + - description: Machine age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Node associated with machine + jsonPath: .status.nodeRef.name + name: Node + priority: 1 + type: string + - description: Provider ID of machine created in cloud provider + jsonPath: .spec.providerID + name: ProviderID + priority: 1 + type: string + - description: State of instance + jsonPath: .metadata.annotations['machine\.openshift\.io/instance-state'] + name: State + priority: 1 + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: 'Machine is the Schema for the machines API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineSpec defines the desired state of Machine + type: object + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + type: object + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + status: + description: MachineStatus defines the observed state of Machine + type: object + properties: + addresses: + description: Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + type: array + items: + description: NodeAddress contains information for the node's address. + type: object + required: + - address + - type + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP or InternalIP. + type: string + conditions: + description: Conditions defines the current state of the Machine + type: array + items: + description: Condition defines an observation of a Machine API resource operational state. + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + errorMessage: + description: "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: string + errorReason: + description: "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: string + lastOperation: + description: LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully. + type: object + properties: + description: + description: Description is the human-readable description of the last operation. + type: string + lastUpdated: + description: LastUpdated is the timestamp at which LastOperation API was last-updated. + type: string + format: date-time + state: + description: State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc + type: string + type: + description: Type is the type of operation which was last performed. E.g. Create, Delete, Update etc + type: string + lastUpdated: + description: LastUpdated identifies when this status was last observed. + type: string + format: date-time + nodeRef: + description: NodeRef will point to the corresponding Node if it exists. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + phase: + description: 'Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting' + type: string + providerStatus: + description: ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field. + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml new file mode 100644 index 000000000..ae73bf3d1 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml @@ -0,0 +1,192 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + api-approved.openshift.io: https://github.com/openshift/api/pull/1032 + creationTimestamp: null + name: machinehealthchecks.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: MachineHealthCheck + listKind: MachineHealthCheckList + plural: machinehealthchecks + shortNames: + - mhc + - mhcs + singular: machinehealthcheck + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Maximum number of unhealthy machines allowed + jsonPath: .spec.maxUnhealthy + name: MaxUnhealthy + type: string + - description: Number of machines currently monitored + jsonPath: .status.expectedMachines + name: ExpectedMachines + type: integer + - description: Current observed healthy machines + jsonPath: .status.currentHealthy + name: CurrentHealthy + type: integer + name: v1beta1 + schema: + openAPIV3Schema: + description: 'MachineHealthCheck is the Schema for the machinehealthchecks API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of machine health check policy + type: object + properties: + maxUnhealthy: + description: Any farther remediation is only allowed if at most "MaxUnhealthy" machines selected by "selector" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation. + default: 100% + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + nodeStartupTimeout: + description: Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to "0". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + type: string + default: 10m + pattern: ^0|([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + remediationTemplate: + description: "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider. \n This field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator." + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + selector: + description: 'Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.' + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + unhealthyConditions: + description: UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy. + type: array + minItems: 1 + items: + description: UnhealthyCondition represents a Node condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a node is considered unhealthy. + type: object + properties: + status: + type: string + minLength: 1 + timeout: + description: Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + type: string + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: + type: string + minLength: 1 + status: + description: Most recently observed status of MachineHealthCheck resource + type: object + properties: + conditions: + description: Conditions defines the current state of the MachineHealthCheck + type: array + items: + description: Condition defines an observation of a Machine API resource operational state. + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + currentHealthy: + description: total number of machines counted by this machine health check + type: integer + minimum: 0 + expectedMachines: + description: total number of machines counted by this machine health check + type: integer + minimum: 0 + remediationsAllowed: + description: RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied + type: integer + format: int32 + minimum: 0 + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml new file mode 100644 index 000000000..8a7b1b628 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml @@ -0,0 +1,347 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + api-approved.openshift.io: https://github.com/openshift/api/pull/1032 + creationTimestamp: null + name: machinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: MachineSet + listKind: MachineSetList + plural: machinesets + singular: machineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Observed number of available replicas + jsonPath: .status.availableReplicas + name: Available + type: string + - description: Machineset age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: 'MachineSet ensures that a specified number of machines replicas are running at any given time. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineSetSpec defines the desired state of MachineSet + type: object + properties: + deletePolicy: + description: DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + type: string + enum: + - Random + - Newest + - Oldest + minReadySeconds: + description: MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready) + type: integer + format: int32 + replicas: + description: Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. + type: integer + format: int32 + default: 1 + selector: + description: 'Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template''s labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + template: + description: Template is the object that describes the machine that will be created if insufficient replicas are detected. + type: object + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + spec: + description: 'Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + type: object + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + type: object + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + status: + description: MachineSetStatus defines the observed state of MachineSet + type: object + properties: + availableReplicas: + description: The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + type: integer + format: int32 + errorMessage: + type: string + errorReason: + description: "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption. \n These fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output." + type: string + fullyLabeledReplicas: + description: The number of replicas that have labels matching the labels of the machine template of the MachineSet. + type: integer + format: int32 + observedGeneration: + description: ObservedGeneration reflects the generation of the most recently observed MachineSet. + type: integer + format: int64 + readyReplicas: + description: The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + type: integer + format: int32 + replicas: + description: Replicas is the most recently observed number of replicas. + type: integer + format: int32 + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1beta1/doc.go b/vendor/github.com/openshift/api/machine/v1beta1/doc.go new file mode 100644 index 000000000..e14fc64e3 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=machine.openshift.io +package v1beta1 diff --git a/vendor/github.com/openshift/api/machine/v1beta1/register.go b/vendor/github.com/openshift/api/machine/v1beta1/register.go new file mode 100644 index 000000000..a3678c007 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/register.go @@ -0,0 +1,44 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "machine.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &Machine{}, + &MachineList{}, + &MachineSet{}, + &MachineSetList{}, + &MachineHealthCheck{}, + &MachineHealthCheckList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go new file mode 100644 index 000000000..e2eec1a6a --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -0,0 +1,306 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AWSMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // AMI is the reference to the AMI from which to create the machine instance. + AMI AWSResourceReference `json:"ami"` + // InstanceType is the type of instance to create. Example: m4.xlarge + InstanceType string `json:"instanceType"` + // Tags is the set of tags to add to apply to an instance, in addition to the ones + // added by default by the actuator. These tags are additive. The actuator will ensure + // these tags are present, but will not remove any other tags that may exist on the + // instance. + // +optional + Tags []TagSpecification `json:"tags,omitempty"` + // IAMInstanceProfile is a reference to an IAM role to assign to the instance + // +optional + IAMInstanceProfile *AWSResourceReference `json:"iamInstanceProfile,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions + // provided by attached IAM role where the actuator is running. + // +optional + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + // KeyName is the name of the KeyPair to use for SSH + // +optional + KeyName *string `json:"keyName,omitempty"` + // DeviceIndex is the index of the device on the instance for the network interface attachment. + // Defaults to 0. + DeviceIndex int64 `json:"deviceIndex"` + // PublicIP specifies whether the instance should get a public IP. If not present, + // it should use the default of its subnet. + // +optional + PublicIP *bool `json:"publicIp,omitempty"` + // NetworkInterfaceType specifies the type of network interface to be used for the primary + // network interface. + // Valid values are "ENA", "EFA", and omitted, which means no opinion and the platform + // chooses a good default which may change over time. + // The current default value is "ENA". + // Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more + // about the AWS Elastic Fabric Adapter interface option. + // +kubebuilder:validation:Enum:="ENA";"EFA" + // +optional + NetworkInterfaceType AWSNetworkInterfaceType `json:"networkInterfaceType,omitempty"` + // SecurityGroups is an array of references to security groups that should be applied to the + // instance. + // +optional + SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` + // Subnet is a reference to the subnet to use for this instance + Subnet AWSResourceReference `json:"subnet"` + // Placement specifies where to create the instance in AWS + Placement Placement `json:"placement"` + // LoadBalancers is the set of load balancers to which the new instance + // should be added once it is created. + // +optional + LoadBalancers []LoadBalancerReference `json:"loadBalancers,omitempty"` + // BlockDevices is the set of block device mapping associated to this instance, + // block device without a name will be used as a root device and only one device without a name is allowed + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html + // +optional + BlockDevices []BlockDeviceMappingSpec `json:"blockDevices,omitempty"` + // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. + // +optional + SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` + // MetadataServiceOptions allows users to configure instance metadata service interaction options. + // If nothing specified, default AWS IMDS settings will be applied. + // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html + // +optional + MetadataServiceOptions MetadataServiceOptions `json:"metadataServiceOptions,omitempty"` +} + +// BlockDeviceMappingSpec describes a block device mapping +type BlockDeviceMappingSpec struct { + // The device name exposed to the machine (for example, /dev/sdh or xvdh). + // +optional + DeviceName *string `json:"deviceName,omitempty"` + // Parameters used to automatically set up EBS volumes when the machine is + // launched. + // +optional + EBS *EBSBlockDeviceSpec `json:"ebs,omitempty"` + // Suppresses the specified device included in the block device mapping of the + // AMI. + // +optional + NoDevice *string `json:"noDevice,omitempty"` + // The virtual device name (ephemeralN). Machine store volumes are numbered + // starting from 0. An machine type with 2 available machine store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // machine store volumes depends on the machine type. After you connect to + // the machine, you must mount the volume. + // + // Constraints: For M3 machines, you must specify machine store volumes in + // the block device mapping for the machine. When you launch an M3 machine, + // we ignore any machine store volumes specified in the block device mapping + // for the AMI. + // +optional + VirtualName *string `json:"virtualName,omitempty"` +} + +// EBSBlockDeviceSpec describes a block device for an EBS volume. +// https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice +type EBSBlockDeviceSpec struct { + // Indicates whether the EBS volume is deleted on machine termination. + // +optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes + // may only be attached to machines that support Amazon EBS encryption. + // +optional + Encrypted *bool `json:"encrypted,omitempty"` + // Indicates the KMS key that should be used to encrypt the Amazon EBS volume. + // +optional + KMSKey AWSResourceReference `json:"kmsKey,omitempty"` + // The number of I/O operations per second (IOPS) that the volume supports. + // For io1, this represents the number of IOPS that are provisioned for the + // volume. For gp2, this represents the baseline performance of the volume and + // the rate at which the volume accumulates I/O credits for bursting. For more + // information about General Purpose SSD baseline performance, I/O credits, + // and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Minimal and maximal IOPS for io1 and gp2 are constrained. Please, check + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html + // for precise boundaries for individual volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // +optional + Iops *int64 `json:"iops,omitempty"` + // The size of the volume, in GiB. + // + // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned + // IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for + // Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify + // a snapshot, the volume size must be equal to or larger than the snapshot + // size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + // +optional + VolumeSize *int64 `json:"volumeSize,omitempty"` + // The volume type: gp2, io1, st1, sc1, or standard. + // Default: standard + // +optional + VolumeType *string `json:"volumeType,omitempty"` +} + +// SpotMarketOptions defines the options available to a user when configuring +// Machines to run on Spot instances. +// Most users should provide an empty struct. +type SpotMarketOptions struct { + // The maximum price the user is willing to pay for their instances + // Default: On-Demand price + // +optional + MaxPrice *string `json:"maxPrice,omitempty"` +} + +type MetadataServiceAuthentication string + +const ( + // MetadataServiceAuthenticationRequired enforces sending of a signed token header with any instance metadata retrieval (GET) requests. + // Enforces IMDSv2 usage. + MetadataServiceAuthenticationRequired = "Required" + // MetadataServiceAuthenticationOptional allows IMDSv1 usage along with IMDSv2 + MetadataServiceAuthenticationOptional = "Optional" +) + +// MetadataServiceOptions defines the options available to a user when configuring +// Instance Metadata Service (IMDS) Options. +type MetadataServiceOptions struct { + // Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. + // When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. + // When omitted, this means the user has no opinion and the value is left to the platform to choose a good + // default, which is subject to change over time. The current default is optional. + // At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API + // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html + // +kubebuilder:validation:Enum=Required;Optional + // +optional + Authentication MetadataServiceAuthentication `json:"authentication,omitempty"` +} + +// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. +// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in +// a validation error. +type AWSResourceReference struct { + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + // ARN of resource + // +optional + ARN *string `json:"arn,omitempty"` + // Filters is a set of filters used to identify a resource + // +optional + Filters []Filter `json:"filters,omitempty"` +} + +// Placement indicates where to create the instance in AWS +type Placement struct { + // Region is the region to use to create the instance + // +optional + Region string `json:"region,omitempty"` + // AvailabilityZone is the availability zone of the instance + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` + // Tenancy indicates if instance should run on shared or single-tenant hardware. There are + // supported 3 options: default, dedicated and host. + // +optional + Tenancy InstanceTenancy `json:"tenancy,omitempty"` +} + +// Filter is a filter used to identify an AWS resource +type Filter struct { + // Name of the filter. Filter names are case-sensitive. + Name string `json:"name"` + // Values includes one or more filter values. Filter values are case-sensitive. + // +optional + Values []string `json:"values,omitempty"` +} + +// TagSpecification is the name/value pair for a tag +type TagSpecification struct { + // Name of the tag + Name string `json:"name"` + // Value of the tag + Value string `json:"value"` +} + +// AWSMachineProviderConfigList contains a list of AWSMachineProviderConfig +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type AWSMachineProviderConfigList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSMachineProviderConfig `json:"items"` +} + +// LoadBalancerReference is a reference to a load balancer on AWS. +type LoadBalancerReference struct { + Name string `json:"name"` + Type AWSLoadBalancerType `json:"type"` +} + +// AWSLoadBalancerType is the type of LoadBalancer to use when registering +// an instance with load balancers specified in LoadBalancerNames +type AWSLoadBalancerType string + +// InstanceTenancy indicates if instance should run on shared or single-tenant hardware. +type InstanceTenancy string + +const ( + // DefaultTenancy instance runs on shared hardware + DefaultTenancy InstanceTenancy = "default" + // DedicatedTenancy instance runs on single-tenant hardware + DedicatedTenancy InstanceTenancy = "dedicated" + // HostTenancy instance runs on a Dedicated Host, which is an isolated server with configurations that you can control. + HostTenancy InstanceTenancy = "host" +) + +// Possible values for AWSLoadBalancerType. Add to this list as other types +// of load balancer are supported by the actuator. +const ( + ClassicLoadBalancerType AWSLoadBalancerType = "classic" // AWS classic ELB + NetworkLoadBalancerType AWSLoadBalancerType = "network" // AWS Network Load Balancer (NLB) +) + +// AWSNetworkInterfaceType defines the network interface type of the the +// AWS EC2 network interface. +type AWSNetworkInterfaceType string + +const ( + // AWSENANetworkInterfaceType is the default network interface type, + // the EC2 Elastic Network Adapter commonly used with EC2 instances. + // This should be used for standard network operations. + AWSENANetworkInterfaceType AWSNetworkInterfaceType = "ENA" + // AWSEFANetworkInterfaceType is the Elastic Fabric Adapter network interface type. + AWSEFANetworkInterfaceType AWSNetworkInterfaceType = "EFA" +) + +// AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains AWS-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type AWSMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // InstanceID is the instance ID of the machine created in AWS + // +optional + InstanceID *string `json:"instanceId,omitempty"` + // InstanceState is the state of the AWS instance for this machine + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go new file mode 100644 index 000000000..914e73478 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go @@ -0,0 +1,394 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. +// Required parameters such as location that are not specified by this configuration, will be defaulted +// by the actuator. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AzureMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with Azure credentials. + // +optional + CredentialsSecret *corev1.SecretReference `json:"credentialsSecret,omitempty"` + // Location is the region to use to create the instance + // +optional + Location string `json:"location,omitempty"` + // VMSize is the size of the VM to create. + // +optional + VMSize string `json:"vmSize,omitempty"` + // Image is the OS image to use to create the instance. + Image Image `json:"image"` + // OSDisk represents the parameters for creating the OS disk. + OSDisk OSDisk `json:"osDisk"` + // DataDisk specifies the parameters that are used to add one or more data disks to the machine. + // +optional + DataDisks []DataDisk `json:"dataDisks,omitempty"` + // SSHPublicKey is the public key to use to SSH to the virtual machine. + // +optional + SSHPublicKey string `json:"sshPublicKey,omitempty"` + // PublicIP if true a public IP will be used + PublicIP bool `json:"publicIP"` + // Tags is a list of tags to apply to the machine. + // +optional + Tags map[string]string `json:"tags,omitempty"` + // Network Security Group that needs to be attached to the machine's interface. + // No security group will be attached if empty. + // +optional + SecurityGroup string `json:"securityGroup,omitempty"` + // Application Security Groups that need to be attached to the machine's interface. + // No application security groups will be attached if zero-length. + // +optional + ApplicationSecurityGroups []string `json:"applicationSecurityGroups,omitempty"` + // Subnet to use for this instance + Subnet string `json:"subnet"` + // PublicLoadBalancer to use for this instance + // +optional + PublicLoadBalancer string `json:"publicLoadBalancer,omitempty"` + // InternalLoadBalancerName to use for this instance + // +optional + InternalLoadBalancer string `json:"internalLoadBalancer,omitempty"` + // NatRule to set inbound NAT rule of the load balancer + // +optional + NatRule *int64 `json:"natRule,omitempty"` + // ManagedIdentity to set managed identity name + // +optional + ManagedIdentity string `json:"managedIdentity,omitempty"` + // Vnet to set virtual network name + // +optional + Vnet string `json:"vnet,omitempty"` + // Availability Zone for the virtual machine. + // If nil, the virtual machine should be deployed to no zone + // +optional + Zone *string `json:"zone,omitempty"` + // NetworkResourceGroup is the resource group for the virtual machine's network + // +optional + NetworkResourceGroup string `json:"networkResourceGroup,omitempty"` + // ResourceGroup is the resource group for the virtual machine + // +optional + ResourceGroup string `json:"resourceGroup,omitempty"` + // SpotVMOptions allows the ability to specify the Machine should use a Spot VM + // +optional + SpotVMOptions *SpotVMOptions `json:"spotVMOptions,omitempty"` + // SecurityProfile specifies the Security profile settings for a virtual machine. + // +optional + SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` + // UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. + // This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. + // This Azure feature is subject to a specific scope and certain limitations. + // More informations on this can be found in the official Azure documentation for Ultra Disks: + // (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations). + // + // When omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. + // If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). + // This may manifest in the Pod being stuck in `ContainerCreating` phase. + // This defaulting behaviour may be subject to change in future. + // + // When set to "Enabled", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. + // This will thus allow UltraSSD both as Data Disks and Persistent Volumes. + // If set to "Enabled" when the capability can't be available due to scope and limitations, the Machine will go into "Failed" state. + // + // When set to "Disabled", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. + // In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a "Failed" state. + // If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase. + // + // +kubebuilder:validation:Enum:="Enabled";"Disabled" + // +optional + UltraSSDCapability AzureUltraSSDCapabilityState `json:"ultraSSDCapability,omitempty"` + // AcceleratedNetworking enables or disables Azure accelerated networking feature. + // Set to false by default. If true, then this will depend on whether the requested + // VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error. + // +optional + AcceleratedNetworking bool `json:"acceleratedNetworking,omitempty"` + // AvailabilitySet specifies the availability set to use for this instance. + // Availability set should be precreated, before using this field. + // +optional + AvailabilitySet string `json:"availabilitySet,omitempty"` +} + +// SpotVMOptions defines the options relevant to running the Machine on Spot VMs +type SpotVMOptions struct { + // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances + // +optional + MaxPrice *resource.Quantity `json:"maxPrice,omitempty"` +} + +// AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains Azure-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type AzureMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // VMID is the ID of the virtual machine created in Azure. + // +optional + VMID *string `json:"vmId,omitempty"` + // VMState is the provisioning state of the Azure virtual machine. + // +optional + VMState *AzureVMState `json:"vmState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// VMState describes the state of an Azure virtual machine. +type AzureVMState string + +const ( + // ProvisioningState related values + // VMStateCreating ... + VMStateCreating = AzureVMState("Creating") + // VMStateDeleting ... + VMStateDeleting = AzureVMState("Deleting") + // VMStateFailed ... + VMStateFailed = AzureVMState("Failed") + // VMStateMigrating ... + VMStateMigrating = AzureVMState("Migrating") + // VMStateSucceeded ... + VMStateSucceeded = AzureVMState("Succeeded") + // VMStateUpdating ... + VMStateUpdating = AzureVMState("Updating") + + // PowerState related values + // VMStateStarting ... + VMStateStarting = AzureVMState("Starting") + // VMStateRunning ... + VMStateRunning = AzureVMState("Running") + // VMStateStopping ... + VMStateStopping = AzureVMState("Stopping") + // VMStateStopped ... + VMStateStopped = AzureVMState("Stopped") + // VMStateDeallocating ... + VMStateDeallocating = AzureVMState("Deallocating") + // VMStateDeallocated ... + VMStateDeallocated = AzureVMState("Deallocated") + // VMStateUnknown ... + VMStateUnknown = AzureVMState("Unknown") +) + +// Image is a mirror of azure sdk compute.ImageReference +type Image struct { + // Publisher is the name of the organization that created the image + Publisher string `json:"publisher"` + // Offer specifies the name of a group of related images created by the publisher. + // For example, UbuntuServer, WindowsServer + Offer string `json:"offer"` + // SKU specifies an instance of an offer, such as a major release of a distribution. + // For example, 18.04-LTS, 2019-Datacenter + SKU string `json:"sku"` + // Version specifies the version of an image sku. The allowed formats + // are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. + // Specify 'latest' to use the latest version of an image available at deploy time. + // Even if you use 'latest', the VM image will not automatically update after deploy + // time even if a new version becomes available. + Version string `json:"version"` + // ResourceID specifies an image to use by ID + ResourceID string `json:"resourceID"` + // Type identifies the source of the image and related information, such as purchase plans. + // Valid values are "ID", "MarketplaceWithPlan", "MarketplaceNoPlan", and omitted, which + // means no opinion and the platform chooses a good default which may change over time. + // Currently that default is "MarketplaceNoPlan" if publisher data is supplied, or "ID" if not. + // For more information about purchase plans, see: + // https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information + // +optional + Type AzureImageType `json:"type,omitempty"` +} + +// AzureImageType provides an enumeration for the valid image types. +type AzureImageType string + +const ( + // AzureImageTypeID specifies that the image should be referenced by its resource ID. + AzureImageTypeID AzureImageType = "ID" + // AzureImageTypeMarketplaceNoPlan are images available from the marketplace that do not require a purchase plan. + AzureImageTypeMarketplaceNoPlan AzureImageType = "MarketplaceNoPlan" + // AzureImageTypeMarketplaceWithPlan require a purchase plan. Upstream these images are referred to as "ThirdParty." + AzureImageTypeMarketplaceWithPlan AzureImageType = "MarketplaceWithPlan" +) + +type OSDisk struct { + // OSType is the operating system type of the OS disk. Possible values include "Linux" and "Windows". + OSType string `json:"osType"` + // ManagedDisk specifies the Managed Disk parameters for the OS disk. + ManagedDisk OSDiskManagedDiskParameters `json:"managedDisk"` + // DiskSizeGB is the size in GB to assign to the data disk. + DiskSizeGB int32 `json:"diskSizeGB"` + // DiskSettings describe ephemeral disk settings for the os disk. + // +optional + DiskSettings DiskSettings `json:"diskSettings,omitempty"` + // CachingType specifies the caching requirements. + // Possible values include: 'None', 'ReadOnly', 'ReadWrite'. + // Empty value means no opinion and the platform chooses a default, which is subject to change over + // time. Currently the default is `None`. + // +optional + // +kubebuilder:validation:Enum=None;ReadOnly;ReadWrite + CachingType string `json:"cachingType,omitempty"` +} + +// DataDisk specifies the parameters that are used to add one or more data disks to the machine. +// A Data Disk is a managed disk that's attached to a virtual machine to store application data. +// It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. +// It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`. +// +// As the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. +// This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. +// At this stage the previously defined `lun` is to be used as the "device" key for referencing the raw disk device to be initialized. +// Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. +// For further guidance and examples, please refer to the official OpenShift docs. +type DataDisk struct { + // NameSuffix is the suffix to be appended to the machine name to generate the disk name. + // Each disk name will be in format _. + // NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. + // The overall disk name must not exceed 80 chars in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9](?:[\w\.-]*[a-zA-Z0-9])?$` + // +kubebuilder:validation:MaxLength:=78 + // +kubebuilder:validation:Required + NameSuffix string `json:"nameSuffix"` + // DiskSizeGB is the size in GB to assign to the data disk. + // +kubebuilder:validation:Minimum=4 + // +kubebuilder:validation:Required + DiskSizeGB int32 `json:"diskSizeGB"` + // ManagedDisk specifies the Managed Disk parameters for the data disk. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is a ManagedDisk with with storageAccountType: "Premium_LRS" and diskEncryptionSet.id: "Default". + // +optional + ManagedDisk DataDiskManagedDiskParameters `json:"managedDisk,omitempty"` + // Lun Specifies the logical unit number of the data disk. + // This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + // This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). + // The value must be between 0 and 63. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=63 + // +kubebuilder:validation:Required + Lun int32 `json:"lun,omitempty"` + // CachingType specifies the caching requirements. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is CachingTypeNone. + // +optional + // +kubebuilder:validation:Enum=None;ReadOnly;ReadWrite + CachingType CachingTypeOption `json:"cachingType,omitempty"` + // DeletionPolicy specifies the data disk deletion policy upon Machine deletion. + // Possible values are "Delete","Detach". + // When "Delete" is used the data disk is deleted when the Machine is deleted. + // When "Detach" is used the data disk is detached from the Machine and retained when the Machine is deleted. + // +kubebuilder:validation:Enum=Delete;Detach + // +kubebuilder:validation:Required + DeletionPolicy DiskDeletionPolicyType `json:"deletionPolicy"` +} + +// DiskDeletionPolicyType defines the possible values for DeletionPolicy. +type DiskDeletionPolicyType string + +// These are the valid DiskDeletionPolicyType values. +const ( + // DiskDeletionPolicyTypeDelete means the DiskDeletionPolicyType is "Delete". + DiskDeletionPolicyTypeDelete DiskDeletionPolicyType = "Delete" + // DiskDeletionPolicyTypeDetach means the DiskDeletionPolicyType is "Detach". + DiskDeletionPolicyTypeDetach DiskDeletionPolicyType = "Detach" +) + +// CachingTypeOption defines the different values for a CachingType. +type CachingTypeOption string + +// These are the valid CachingTypeOption values. +const ( + // CachingTypeReadOnly means the CachingType is "ReadOnly". + CachingTypeReadOnly CachingTypeOption = "ReadOnly" + // CachingTypeReadWrite means the CachingType is "ReadWrite". + CachingTypeReadWrite CachingTypeOption = "ReadWrite" + // CachingTypeNone means the CachingType is "None". + CachingTypeNone CachingTypeOption = "None" +) + +// DiskSettings describe ephemeral disk settings for the os disk. +type DiskSettings struct { + // EphemeralStorageLocation enables ephemeral OS when set to 'Local'. + // Possible values include: 'Local'. + // See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. + // Empty value means no opinion and the platform chooses a default, which is subject to change over + // time. Currently the default is that disks are saved to remote Azure storage. + // +optional + // +kubebuilder:validation:Enum=Local + EphemeralStorageLocation string `json:"ephemeralStorageLocation,omitempty"` +} + +// OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk. +type OSDiskManagedDiskParameters struct { + // StorageAccountType is the storage account type to use. + // Possible values include "Standard_LRS", "Premium_LRS". + StorageAccountType string `json:"storageAccountType"` + // DiskEncryptionSet is the disk encryption set properties + // +optional + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` +} + +// DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk. +type DataDiskManagedDiskParameters struct { + // StorageAccountType is the storage account type to use. + // Possible values include "Standard_LRS", "Premium_LRS" and "UltraSSD_LRS". + // +kubebuilder:validation:Enum=Standard_LRS;Premium_LRS;UltraSSD_LRS + StorageAccountType StorageAccountType `json:"storageAccountType"` + // DiskEncryptionSet is the disk encryption set properties. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is a DiskEncryptionSet with id: "Default". + // +optional + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` +} + +// StorageAccountType defines the different storage types to use for a ManagedDisk. +type StorageAccountType string + +// These are the valid StorageAccountType types. +const ( + // "StorageAccountStandardLRS" means the Standard_LRS storage type. + StorageAccountStandardLRS StorageAccountType = "Standard_LRS" + // "StorageAccountPremiumLRS" means the Premium_LRS storage type. + StorageAccountPremiumLRS StorageAccountType = "Premium_LRS" + // "StorageAccountUltraSSDLRS" means the UltraSSD_LRS storage type. + StorageAccountUltraSSDLRS StorageAccountType = "UltraSSD_LRS" +) + +// DiskEncryptionSetParameters is the disk encryption set properties +type DiskEncryptionSetParameters struct { + // ID is the disk encryption set ID + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is: "Default". + // +optional + ID string `json:"id,omitempty"` +} + +// SecurityProfile specifies the Security profile settings for a +// virtual machine or virtual machine scale set. +type SecurityProfile struct { + // This field indicates whether Host Encryption should be enabled + // or disabled for a virtual machine or virtual machine scale + // set. Default is disabled. + // +optional + EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"` +} + +// AzureUltraSSDCapabilityState defines the different states of an UltraSSDCapability +type AzureUltraSSDCapabilityState string + +// These are the valid AzureUltraSSDCapabilityState states. +const ( + // "AzureUltraSSDCapabilityEnabled" means the Azure UltraSSDCapability is Enabled + AzureUltraSSDCapabilityEnabled AzureUltraSSDCapabilityState = "Enabled" + // "AzureUltraSSDCapabilityDisabled" means the Azure UltraSSDCapability is Disabled + AzureUltraSSDCapabilityDisabled AzureUltraSSDCapabilityState = "Disabled" +) diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go new file mode 100644 index 000000000..6daac65eb --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go @@ -0,0 +1,203 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GCPHostMaintenanceType is a type representing acceptable values for OnHostMaintenance field in GCPMachineProviderSpec +type GCPHostMaintenanceType string + +const ( + // MigrateHostMaintenanceType [default] - causes Compute Engine to live migrate an instance when there is a maintenance event. + MigrateHostMaintenanceType GCPHostMaintenanceType = "Migrate" + // TerminateHostMaintenanceType - stops an instance instead of migrating it. + TerminateHostMaintenanceType GCPHostMaintenanceType = "Terminate" +) + +// GCPHostMaintenanceType is a type representing acceptable values for RestartPolicy field in GCPMachineProviderSpec +type GCPRestartPolicyType string + +const ( + // Restart an instance if an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event. + RestartPolicyAlways GCPRestartPolicyType = "Always" + // Do not restart an instance if an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event. + RestartPolicyNever GCPRestartPolicyType = "Never" +) + +// GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type GCPMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with GCP credentials. + // +optional + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + // CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. + // This is required if you plan to use this instance to forward routes. + CanIPForward bool `json:"canIPForward"` + // DeletionProtection whether the resource should be protected against deletion. + DeletionProtection bool `json:"deletionProtection"` + // Disks is a list of disks to be attached to the VM. + // +optional + Disks []*GCPDisk `json:"disks,omitempty"` + // Labels list of labels to apply to the VM. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // Metadata key/value pairs to apply to the VM. + // +optional + Metadata []*GCPMetadata `json:"gcpMetadata,omitempty"` + // NetworkInterfaces is a list of network interfaces to be attached to the VM. + // +optional + NetworkInterfaces []*GCPNetworkInterface `json:"networkInterfaces,omitempty"` + // ServiceAccounts is a list of GCP service accounts to be used by the VM. + ServiceAccounts []GCPServiceAccount `json:"serviceAccounts"` + // Tags list of tags to apply to the VM. + Tags []string `json:"tags,omitempty"` + // TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, + // an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool + // +optional + TargetPools []string `json:"targetPools,omitempty"` + // MachineType is the machine type to use for the VM. + MachineType string `json:"machineType"` + // Region is the region in which the GCP machine provider will create the VM. + Region string `json:"region"` + // Zone is the zone in which the GCP machine provider will create the VM. + Zone string `json:"zone"` + // ProjectID is the project in which the GCP machine provider will create the VM. + // +optional + ProjectID string `json:"projectID,omitempty"` + // GPUs is a list of GPUs to be attached to the VM. + // +optional + GPUs []GCPGPUConfig `json:"gpus,omitempty"` + // Preemptible indicates if created instance is preemptible. + // +optional + Preemptible bool `json:"preemptible,omitempty"` + // OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. + // This is required to be set to "Terminate" if you want to provision machine with attached GPUs. + // Otherwise, allowed values are "Migrate" and "Terminate". + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Migrate". + // +kubebuilder:validation:Enum=Migrate;Terminate; + // +optional + OnHostMaintenance GCPHostMaintenanceType `json:"onHostMaintenance,omitempty"` + // RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default "Always"). + // Cannot be "Always" with preemptible instances. + // Otherwise, allowed values are "Always" and "Never". + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Always". + // RestartPolicy represents AutomaticRestart in GCP compute api + // +kubebuilder:validation:Enum=Always;Never; + // +optional + RestartPolicy GCPRestartPolicyType `json:"restartPolicy,omitempty"` +} + +// GCPDisk describes disks for GCP. +type GCPDisk struct { + // AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false). + AutoDelete bool `json:"autoDelete"` + // Boot indicates if this is a boot disk (default false). + Boot bool `json:"boot"` + // SizeGB is the size of the disk (in GB). + SizeGB int64 `json:"sizeGb"` + // Type is the type of the disk (eg: pd-standard). + Type string `json:"type"` + // Image is the source image to create this disk. + Image string `json:"image"` + // Labels list of labels to apply to the disk. + Labels map[string]string `json:"labels"` + // EncryptionKey is the customer-supplied encryption key of the disk. + // +optional + EncryptionKey *GCPEncryptionKeyReference `json:"encryptionKey,omitempty"` +} + +// GCPMetadata describes metadata for GCP. +type GCPMetadata struct { + // Key is the metadata key. + Key string `json:"key"` + // Value is the metadata value. + Value *string `json:"value"` +} + +// GCPNetworkInterface describes network interfaces for GCP +type GCPNetworkInterface struct { + // PublicIP indicates if true a public IP will be used + PublicIP bool `json:"publicIP,omitempty"` + // Network is the network name. + Network string `json:"network,omitempty"` + // ProjectID is the project in which the GCP machine provider will create the VM. + ProjectID string `json:"projectID,omitempty"` + // Subnetwork is the subnetwork name. + Subnetwork string `json:"subnetwork,omitempty"` +} + +// GCPServiceAccount describes service accounts for GCP. +type GCPServiceAccount struct { + // Email is the service account email. + Email string `json:"email"` + // Scopes list of scopes to be assigned to the service account. + Scopes []string `json:"scopes"` +} + +// GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption. +type GCPEncryptionKeyReference struct { + // KMSKeyName is the reference KMS key, in the format + // +optional + KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` + // KMSKeyServiceAccount is the service account being used for the + // encryption request for the given KMS key. If absent, the Compute + // Engine default service account is used. + // See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account + // for details on the default service account. + // +optional + KMSKeyServiceAccount string `json:"kmsKeyServiceAccount,omitempty"` +} + +// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key +type GCPKMSKeyReference struct { + // Name is the name of the customer managed encryption key to be used for the disk encryption. + Name string `json:"name"` + // KeyRing is the name of the KMS Key Ring which the KMS Key belongs to. + KeyRing string `json:"keyRing"` + // ProjectID is the ID of the Project in which the KMS Key Ring exists. + // Defaults to the VM ProjectID if not set. + // +optional + ProjectID string `json:"projectID,omitempty"` + // Location is the GCP location in which the Key Ring exists. + Location string `json:"location"` +} + +// GCPGPUConfig describes type and count of GPUs attached to the instance on GCP. +type GCPGPUConfig struct { + // Count is the number of GPUs to be attached to an instance. + Count int32 `json:"count"` + // Type is the type of GPU to be attached to an instance. + // Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4 + // +kubebuilder:validation:Pattern=`^nvidia-tesla-(k80|p100|v100|p4|t4)$` + Type string `json:"type"` +} + +// GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains GCP-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type GCPMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // InstanceID is the ID of the instance in GCP + // +optional + InstanceID *string `json:"instanceId,omitempty"` + // InstanceState is the provisioning state of the GCP Instance. + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go new file mode 100644 index 000000000..6cbc07f89 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go @@ -0,0 +1,345 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +const ( + // MachineFinalizer is set on PrepareForCreate callback. + MachineFinalizer = "machine.machine.openshift.io" + + // MachineClusterLabelName is the label set on machines linked to a cluster. + MachineClusterLabelName = "cluster.k8s.io/cluster-name" + + // MachineClusterIDLabel is the label that a machine must have to identify the + // cluster to which it belongs. + MachineClusterIDLabel = "machine.openshift.io/cluster-api-cluster" +) + +type MachineStatusError string + +const ( + // Represents that the combination of configuration in the MachineSpec + // is not supported by this cluster. This is not a transient error, but + // indicates a state that must be fixed before progress can be made. + // + // Example: the ProviderSpec specifies an instance type that doesn't exist, + InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration" + + // This indicates that the MachineSpec has been updated in a way that + // is not supported for reconciliation on this cluster. The spec may be + // completely valid from a configuration standpoint, but the controller + // does not support changing the real world state to match the new + // spec. + // + // Example: the responsible controller is not capable of changing the + // container runtime from docker to rkt. + UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange" + + // This generally refers to exceeding one's quota in a cloud provider, + // or running out of physical machines in an on-premise environment. + InsufficientResourcesMachineError MachineStatusError = "InsufficientResources" + + // There was an error while trying to create a Node to match this + // Machine. This may indicate a transient problem that will be fixed + // automatically with time, such as a service outage, or a terminal + // error during creation that doesn't match a more specific + // MachineStatusError value. + // + // Example: timeout trying to connect to GCE. + CreateMachineError MachineStatusError = "CreateError" + + // There was an error while trying to update a Node that this + // Machine represents. This may indicate a transient problem that will be + // fixed automatically with time, such as a service outage, + // + // Example: error updating load balancers + UpdateMachineError MachineStatusError = "UpdateError" + + // An error was encountered while trying to delete the Node that this + // Machine represents. This could be a transient or terminal error, but + // will only be observable if the provider's Machine controller has + // added a finalizer to the object to more gracefully handle deletions. + // + // Example: cannot resolve EC2 IP address. + DeleteMachineError MachineStatusError = "DeleteError" + + // TemplateClonedFromGroupKindAnnotation is the infrastructure machine + // annotation that stores the group-kind of the infrastructure template resource + // that was cloned for the machine. This annotation is set only during cloning a + // template. Older/adopted machines will not have this annotation. + TemplateClonedFromGroupKindAnnotation = "machine.openshift.io/cloned-from-groupkind" + + // TemplateClonedFromNameAnnotation is the infrastructure machine annotation that + // stores the name of the infrastructure template resource + // that was cloned for the machine. This annotation is set only during cloning a + // template. Older/adopted machines will not have this annotation. + TemplateClonedFromNameAnnotation = "machine.openshift.io/cloned-from-name" + + // This error indicates that the machine did not join the cluster + // as a new node within the expected timeframe after instance + // creation at the provider succeeded + // + // Example use case: A controller that deletes Machines which do + // not result in a Node joining the cluster within a given timeout + // and that are managed by a MachineSet + JoinClusterTimeoutMachineError = "JoinClusterTimeoutError" +) + +type ClusterStatusError string + +const ( + // InvalidConfigurationClusterError indicates that the cluster + // configuration is invalid. + InvalidConfigurationClusterError ClusterStatusError = "InvalidConfiguration" + + // UnsupportedChangeClusterError indicates that the cluster + // spec has been updated in an unsupported way. That cannot be + // reconciled. + UnsupportedChangeClusterError ClusterStatusError = "UnsupportedChange" + + // CreateClusterError indicates that an error was encountered + // when trying to create the cluster. + CreateClusterError ClusterStatusError = "CreateError" + + // UpdateClusterError indicates that an error was encountered + // when trying to update the cluster. + UpdateClusterError ClusterStatusError = "UpdateError" + + // DeleteClusterError indicates that an error was encountered + // when trying to delete the cluster. + DeleteClusterError ClusterStatusError = "DeleteError" +) + +type MachineSetStatusError string + +const ( + // Represents that the combination of configuration in the MachineTemplateSpec + // is not supported by this cluster. This is not a transient error, but + // indicates a state that must be fixed before progress can be made. + // + // Example: the ProviderSpec specifies an instance type that doesn't exist. + InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration" +) + +type MachineDeploymentStrategyType string + +const ( + // Replace the old MachineSet by new one using rolling update + // i.e. gradually scale down the old MachineSet and scale up the new one. + RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Machine is the Schema for the machines API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Phase of machine" +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/instance-type']",description="Type of instance" +// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/region']",description="Region associated with machine" +// +kubebuilder:printcolumn:name="Zone",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/zone']",description="Zone associated with machine" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machine age" +// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.nodeRef.name",description="Node associated with machine",priority=1 +// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID of machine created in cloud provider",priority=1 +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".metadata.annotations['machine\\.openshift\\.io/instance-state']",description="State of instance",priority=1 +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type Machine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSpec `json:"spec,omitempty"` + Status MachineStatus `json:"status,omitempty"` +} + +// MachineSpec defines the desired state of Machine +type MachineSpec struct { + // ObjectMeta will autopopulate the Node created. Use this to + // indicate what labels, annotations, name prefix, etc., should be used + // when creating the Node. + // +optional + ObjectMeta `json:"metadata,omitempty"` + + // LifecycleHooks allow users to pause operations on the machine at + // certain predefined points within the machine lifecycle. + // +optional + LifecycleHooks LifecycleHooks `json:"lifecycleHooks,omitempty"` + + // The list of the taints to be applied to the corresponding Node in additive + // manner. This list will not overwrite any other taints added to the Node on + // an ongoing basis by other entities. These taints should be actively reconciled + // e.g. if you ask the machine controller to apply a taint and then manually remove + // the taint the machine controller will put it back) but not have the machine controller + // remove any taints + // +optional + Taints []corev1.Taint `json:"taints,omitempty"` + + // ProviderSpec details Provider-specific configuration to use during node creation. + // +optional + ProviderSpec ProviderSpec `json:"providerSpec"` + + // ProviderID is the identification ID of the machine provided by the provider. + // This field must match the provider ID as seen on the node object corresponding to this machine. + // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + // with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + // machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + // generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + // able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + // and then a comparison is done to find out unregistered machines and are marked for delete. + // This field will be set by the actuators and consumed by higher level entities like autoscaler that will + // be interfacing with cluster-api as generic provider. + // +optional + ProviderID *string `json:"providerID,omitempty"` +} + +// LifecycleHooks allow users to pause operations on the machine at +// certain prefedined points within the machine lifecycle. +type LifecycleHooks struct { + // PreDrain hooks prevent the machine from being drained. + // This also blocks further lifecycle events, such as termination. + // +listType=map + // +listMapKey=name + // +optional + PreDrain []LifecycleHook `json:"preDrain,omitempty"` + + // PreTerminate hooks prevent the machine from being terminated. + // PreTerminate hooks be actioned after the Machine has been drained. + // +listType=map + // +listMapKey=name + // +optional + PreTerminate []LifecycleHook `json:"preTerminate,omitempty"` +} + +// LifecycleHook represents a single instance of a lifecycle hook +type LifecycleHook struct { + // Name defines a unique name for the lifcycle hook. + // The name should be unique and descriptive, ideally 1-3 words, in CamelCase or + // it may be namespaced, eg. foo.example.com/CamelCase. + // Names must be unique and should only be managed by a single entity. + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MinLength:=3 + // +kubebuilder:validation:MaxLength:=256 + // +kubebuilder:validation:Required + Name string `json:"name"` + + // Owner defines the owner of the lifecycle hook. + // This should be descriptive enough so that users can identify + // who/what is responsible for blocking the lifecycle. + // This could be the name of a controller (e.g. clusteroperator/etcd) + // or an administrator managing the hook. + // +kubebuilder:validation:MinLength:=3 + // +kubebuilder:validation:MaxLength:=512 + // +kubebuilder:validation:Required + Owner string `json:"owner"` +} + +// MachineStatus defines the observed state of Machine +type MachineStatus struct { + // NodeRef will point to the corresponding Node if it exists. + // +optional + NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` + + // LastUpdated identifies when this status was last observed. + // +optional + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // ErrorReason will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorReason *MachineStatusError `json:"errorReason,omitempty"` + + // ErrorMessage will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` + + // ProviderStatus details a Provider-specific status. + // It is recommended that providers maintain their + // own versioned API types that should be + // serialized/deserialized from this field. + // +optional + // +kubebuilder:validation:XPreserveUnknownFields + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` + + // Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + // +optional + Addresses []corev1.NodeAddress `json:"addresses,omitempty"` + + // LastOperation describes the last-operation performed by the machine-controller. + // This API should be useful as a history in terms of the latest operation performed on the + // specific machine. It should also convey the state of the latest-operation for example if + // it is still on-going, failed or completed successfully. + // +optional + LastOperation *LastOperation `json:"lastOperation,omitempty"` + + // Phase represents the current phase of machine actuation. + // One of: Failed, Provisioning, Provisioned, Running, Deleting + // +optional + Phase *string `json:"phase,omitempty"` + + // Conditions defines the current state of the Machine + Conditions Conditions `json:"conditions,omitempty"` +} + +// LastOperation represents the detail of the last performed operation on the MachineObject. +type LastOperation struct { + // Description is the human-readable description of the last operation. + Description *string `json:"description,omitempty"` + + // LastUpdated is the timestamp at which LastOperation API was last-updated. + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // State is the current status of the last performed operation. + // E.g. Processing, Failed, Successful etc + State *string `json:"state,omitempty"` + + // Type is the type of operation which was last performed. + // E.g. Create, Delete, Update etc + Type *string `json:"type,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineList contains a list of Machine +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Machine `json:"items"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go new file mode 100644 index 000000000..bb79f725c --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go @@ -0,0 +1,135 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// RemediationStrategyType contains remediation strategy type +type RemediationStrategyType string + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineHealthCheck is the Schema for the machinehealthchecks API +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=mhc;mhcs +// +k8s:openapi-gen=true +// +kubebuilder:printcolumn:name="MaxUnhealthy",type="string",JSONPath=".spec.maxUnhealthy",description="Maximum number of unhealthy machines allowed" +// +kubebuilder:printcolumn:name="ExpectedMachines",type="integer",JSONPath=".status.expectedMachines",description="Number of machines currently monitored" +// +kubebuilder:printcolumn:name="CurrentHealthy",type="integer",JSONPath=".status.currentHealthy",description="Current observed healthy machines" +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineHealthCheck struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Specification of machine health check policy + // +optional + Spec MachineHealthCheckSpec `json:"spec,omitempty"` + + // Most recently observed status of MachineHealthCheck resource + // +optional + Status MachineHealthCheckStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineHealthCheckList contains a list of MachineHealthCheck +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineHealthCheckList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineHealthCheck `json:"items"` +} + +// MachineHealthCheckSpec defines the desired state of MachineHealthCheck +type MachineHealthCheckSpec struct { + // Label selector to match machines whose health will be exercised. + // Note: An empty selector will match all machines. + Selector metav1.LabelSelector `json:"selector"` + + // UnhealthyConditions contains a list of the conditions that determine + // whether a node is considered unhealthy. The conditions are combined in a + // logical OR, i.e. if any of the conditions is met, the node is unhealthy. + // + // +kubebuilder:validation:MinItems=1 + UnhealthyConditions []UnhealthyCondition `json:"unhealthyConditions"` + + // Any farther remediation is only allowed if at most "MaxUnhealthy" machines selected by + // "selector" are not healthy. + // Expects either a postive integer value or a percentage value. + // Percentage values must be positive whole numbers and are capped at 100%. + // Both 0 and 0% are valid and will block all remediation. + // +kubebuilder:default:="100%" + // +kubebuilder:validation:XIntOrString + // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" + // +optional + MaxUnhealthy *intstr.IntOrString `json:"maxUnhealthy,omitempty"` + + // Machines older than this duration without a node will be considered to have + // failed and will be remediated. + // To prevent Machines without Nodes from being removed, disable startup checks + // by setting this value explicitly to "0". + // Expects an unsigned duration string of decimal numbers each with optional + // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + // +optional + // +kubebuilder:default:="10m" + // +kubebuilder:validation:Pattern="^0|([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Type:=string + // +optional + NodeStartupTimeout *metav1.Duration `json:"nodeStartupTimeout,omitempty"` + + // RemediationTemplate is a reference to a remediation template + // provided by an infrastructure provider. + // + // This field is completely optional, when filled, the MachineHealthCheck controller + // creates a new object from the template referenced and hands off remediation of the machine to + // a controller that lives outside of Machine API Operator. + // +optional + RemediationTemplate *corev1.ObjectReference `json:"remediationTemplate,omitempty"` +} + +// UnhealthyCondition represents a Node condition type and value with a timeout +// specified as a duration. When the named condition has been in the given +// status for at least the timeout value, a node is considered unhealthy. +type UnhealthyCondition struct { + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:MinLength=1 + Type corev1.NodeConditionType `json:"type"` + + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:MinLength=1 + Status corev1.ConditionStatus `json:"status"` + + // Expects an unsigned duration string of decimal numbers each with optional + // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Type:=string + Timeout metav1.Duration `json:"timeout"` +} + +// MachineHealthCheckStatus defines the observed state of MachineHealthCheck +type MachineHealthCheckStatus struct { + // total number of machines counted by this machine health check + // +kubebuilder:validation:Minimum=0 + ExpectedMachines *int `json:"expectedMachines"` + + // total number of machines counted by this machine health check + // +kubebuilder:validation:Minimum=0 + CurrentHealthy *int `json:"currentHealthy"` + + // RemediationsAllowed is the number of further remediations allowed by this machine health check before + // maxUnhealthy short circuiting will be applied + // +kubebuilder:validation:Minimum=0 + // +optional + RemediationsAllowed int32 `json:"remediationsAllowed"` + + // Conditions defines the current state of the MachineHealthCheck + // +optional + Conditions Conditions `json:"conditions,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go new file mode 100644 index 000000000..bbc6b736b --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -0,0 +1,138 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineSet ensures that a specified number of machines replicas are running at any given time. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="Desired Replicas" +// +kubebuilder:printcolumn:name="Current",type="integer",JSONPath=".status.replicas",description="Current Replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Ready Replicas" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.availableReplicas",description="Observed number of available replicas" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machineset age" +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSetSpec `json:"spec,omitempty"` + Status MachineSetStatus `json:"status,omitempty"` +} + +// MachineSetSpec defines the desired state of MachineSet +type MachineSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // +kubebuilder:default=1 + Replicas *int32 `json:"replicas,omitempty"` + // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + // Defaults to 0 (machine will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + // DeletePolicy defines the policy used to identify nodes to delete when downscaling. + // Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + // +kubebuilder:validation:Enum=Random;Newest;Oldest + DeletePolicy string `json:"deletePolicy,omitempty"` + // Selector is a label query over machines that should match the replica count. + // Label keys and values that must match in order to be controlled by this MachineSet. + // It must match the machine template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector metav1.LabelSelector `json:"selector"` + // Template is the object that describes the machine that will be created if + // insufficient replicas are detected. + // +optional + Template MachineTemplateSpec `json:"template,omitempty"` +} + +// MachineSetDeletePolicy defines how priority is assigned to nodes to delete when +// downscaling a MachineSet. Defaults to "Random". +type MachineSetDeletePolicy string + +const ( + // RandomMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // Finally, it picks Machines at random to delete. + RandomMachineSetDeletePolicy MachineSetDeletePolicy = "Random" + // NewestMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // It then prioritizes the newest Machines for deletion based on the Machine's CreationTimestamp. + NewestMachineSetDeletePolicy MachineSetDeletePolicy = "Newest" + // OldestMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // It then prioritizes the oldest Machines for deletion based on the Machine's CreationTimestamp. + OldestMachineSetDeletePolicy MachineSetDeletePolicy = "Oldest" +) + +// MachineTemplateSpec describes the data needed to create a Machine from a template +type MachineTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta `json:"metadata,omitempty"` + // Specification of the desired behavior of the machine. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec MachineSpec `json:"spec,omitempty"` +} + +// MachineSetStatus defines the observed state of MachineSet +type MachineSetStatus struct { + // Replicas is the most recently observed number of replicas. + Replicas int32 `json:"replicas"` + // The number of replicas that have labels matching the labels of the machine template of the MachineSet. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` + // The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty"` + // The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty"` + // ObservedGeneration reflects the generation of the most recently observed MachineSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // In the event that there is a terminal problem reconciling the + // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason + // will be populated with a succinct value suitable for machine + // interpretation, while ErrorMessage will contain a more verbose + // string suitable for logging and human consumption. + // + // These fields should not be set for transitive errors that a + // controller faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the MachineTemplate's spec or the configuration of + // the machine controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the machine controller, or the + // responsible machine controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the MachineSet object and/or logged in the + // controller's output. + // +optional + ErrorReason *MachineSetStatusError `json:"errorReason,omitempty"` + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineSetList contains a list of MachineSet +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MachineSet `json:"items"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go new file mode 100644 index 000000000..69a5bd07e --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go @@ -0,0 +1,222 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ProviderSpec defines the configuration to use during node creation. +type ProviderSpec struct { + + // No more than one of the following may be specified. + + // Value is an inlined, serialized representation of the resource + // configuration. It is recommended that providers maintain their own + // versioned API types that should be serialized/deserialized from this + // field, akin to component config. + // +optional + // +kubebuilder:validation:XPreserveUnknownFields + Value *runtime.RawExtension `json:"value,omitempty"` +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. This is a copy of customizable fields from metav1.ObjectMeta. +// +// ObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, +// which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases +// and read-only fields which end up in the generated CRD validation, having it as a subset simplifies +// the API and some issues that can impact user experience. +// +// During the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) +// for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, +// specifically `spec.metadata.creationTimestamp in body must be of type string: "null"`. +// The investigation showed that `controller-tools@v2` behaves differently than its previous version +// when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package. +// +// In more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` +// had validation properties, including for `creationTimestamp` (metav1.Time). +// The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` +// which breaks validation because the field isn't marked as nullable. +// +// In future versions, controller-tools@v2 might allow overriding the type and validation for embedded +// types. When that happens, this hack should be revisited. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid"` +} + +// ConditionSeverity expresses the severity of a Condition Type failing. +type ConditionSeverity string + +const ( + // ConditionSeverityError specifies that a condition with `Status=False` is an error. + ConditionSeverityError ConditionSeverity = "Error" + + // ConditionSeverityWarning specifies that a condition with `Status=False` is a warning. + ConditionSeverityWarning ConditionSeverity = "Warning" + + // ConditionSeverityInfo specifies that a condition with `Status=False` is informative. + ConditionSeverityInfo ConditionSeverity = "Info" + + // ConditionSeverityNone should apply only to conditions with `Status=True`. + ConditionSeverityNone ConditionSeverity = "" +) + +// ConditionType is a valid value for Condition.Type. +type ConditionType string + +// Valid conditions for a machine. +const ( + // MachineCreated indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + // NOTE: MachineCreation is here for historical reasons, MachineCreated should be used instead + MachineCreation ConditionType = "MachineCreation" + // MachineCreated indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + MachineCreated ConditionType = "MachineCreated" + // InstanceExistsCondition is set on the Machine to show whether a virtual mahcine has been created by the cloud provider. + InstanceExistsCondition ConditionType = "InstanceExists" + // RemediationAllowedCondition is set on MachineHealthChecks to show the status of whether the MachineHealthCheck is + // allowed to remediate any Machines or whether it is blocked from remediating any further. + RemediationAllowedCondition ConditionType = "RemediationAllowed" + // ExternalRemediationTemplateAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. + // ExternalRemediationTemplateAvailable is set to false if external remediation template is not found. + ExternalRemediationTemplateAvailable ConditionType = "ExternalRemediationTemplateAvailable" + // ExternalRemediationRequestAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. + // ExternalRemediationRequestAvailable is set to false if creating external remediation request fails. + ExternalRemediationRequestAvailable ConditionType = "ExternalRemediationRequestAvailable" + // MachineDrained is set on a machine to indicate that the machine has been drained. When an error occurs during + // the drain process, the condition will be added with a false status and details of the error. + MachineDrained ConditionType = "Drained" + // MachineDrainable is set on a machine to indicate whether or not the machine can be drained, or, whether some + // deletion hook is blocking the drain operation. + MachineDrainable ConditionType = "Drainable" + // MachineTerminable is set on a machine to indicate whether or not the machine can be terminated, or, whether some + // deletion hook is blocking the termination operation. + MachineTerminable ConditionType = "Terminable" +) + +const ( + // MachineCreationSucceeded indicates machine creation success. + MachineCreationSucceededConditionReason string = "MachineCreationSucceeded" + // MachineCreationFailed indicates machine creation failure. + MachineCreationFailedConditionReason string = "MachineCreationFailed" + // ErrorCheckingProviderReason is the reason used when the exist operation fails. + // This would normally be because we cannot contact the provider. + ErrorCheckingProviderReason = "ErrorCheckingProvider" + // InstanceMissingReason is the reason used when the machine was provisioned, but the instance has gone missing. + InstanceMissingReason = "InstanceMissing" + // InstanceNotCreatedReason is the reason used when the machine has not yet been provisioned. + InstanceNotCreatedReason = "InstanceNotCreated" + // TooManyUnhealthy is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked + // from making any further remediations. + TooManyUnhealthyReason = "TooManyUnhealthy" + // ExternalRemediationTemplateNotFound is the reason used when a machine health check fails to find external remediation template. + ExternalRemediationTemplateNotFound = "ExternalRemediationTemplateNotFound" + // ExternalRemediationRequestCreationFailed is the reason used when a machine health check fails to create external remediation request. + ExternalRemediationRequestCreationFailed = "ExternalRemediationRequestCreationFailed" + // MachineHookPresent indicates that a machine lifecycle hook is blocking part of the lifecycle of the machine. + // This should be used with the `Drainable` and `Terminable` machine condition types. + MachineHookPresent = "HookPresent" + // MachineDrainError indicates an error occurred when draining the machine. + // This should be used with the `Drained` condition type. + MachineDrainError = "DrainError" +) + +// Condition defines an observation of a Machine API resource operational state. +type Condition struct { + // Type of condition in CamelCase or in foo.example.com/CamelCase. + // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + // can be useful (see .node.status.conditions), the ability to deconflict is important. + // +required + Type ConditionType `json:"type"` + + // Status of the condition, one of True, False, Unknown. + // +required + Status corev1.ConditionStatus `json:"status"` + + // Severity provides an explicit classification of Reason code, so the users or machines can immediately + // understand the current situation and act accordingly. + // The Severity field MUST be set only when Status=False. + // +optional + Severity ConditionSeverity `json:"severity,omitempty"` + + // Last time the condition transitioned from one status to another. + // This should be when the underlying condition changed. If that is not known, then using the time when + // the API field changed is acceptable. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + + // The reason for the condition's last transition in CamelCase. + // The specific API may choose whether or not this field is considered a guaranteed API. + // This field may not be empty. + // +optional + Reason string `json:"reason,omitempty"` + + // A human readable message indicating details about the transition. + // This field may be empty. + // +optional + Message string `json:"message,omitempty"` +} + +// Conditions provide observations of the operational state of a Machine API resource. +type Conditions []Condition diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go new file mode 100644 index 000000000..2cf75675d --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go @@ -0,0 +1,138 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VSphereMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with vSphere credentials. + // +optional + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + // Template is the name, inventory path, or instance UUID of the template + // used to clone new machines. + Template string `json:"template"` + // Workspace describes the workspace to use for the machine. + // +optional + Workspace *Workspace `json:"workspace,omitempty"` + // Network is the network configuration for this machine's VM. + Network NetworkSpec `json:"network"` + // NumCPUs is the number of virtual processors in a virtual machine. + // Defaults to the analogue property value in the template from which this + // machine is cloned. + // +optional + NumCPUs int32 `json:"numCPUs,omitempty"` + // NumCPUs is the number of cores among which to distribute CPUs in this + // virtual machine. + // Defaults to the analogue property value in the template from which this + // machine is cloned. + // +optional + NumCoresPerSocket int32 `json:"numCoresPerSocket,omitempty"` + // MemoryMiB is the size of a virtual machine's memory, in MiB. + // Defaults to the analogue property value in the template from which this + // machine is cloned. + // +optional + MemoryMiB int64 `json:"memoryMiB,omitempty"` + // DiskGiB is the size of a virtual machine's disk, in GiB. + // Defaults to the analogue property value in the template from which this + // machine is cloned. + // +optional + DiskGiB int32 `json:"diskGiB,omitempty"` + // Snapshot is the name of the snapshot from which the VM was cloned + // +optional + Snapshot string `json:"snapshot"` + // CloneMode specifies the type of clone operation. + // The LinkedClone mode is only support for templates that have at least + // one snapshot. If the template has no snapshots, then CloneMode defaults + // to FullClone. + // When LinkedClone mode is enabled the DiskGiB field is ignored as it is + // not possible to expand disks of linked clones. + // Defaults to LinkedClone, but fails gracefully to FullClone if the source + // of the clone operation has no snapshots. + // +optional + CloneMode CloneMode `json:"cloneMode,omitempty"` +} + +// CloneMode is the type of clone operation used to clone a VM from a template. +type CloneMode string + +const ( + // FullClone indicates a VM will have no relationship to the source of the + // clone operation once the operation is complete. This is the safest clone + // mode, but it is not the fastest. + FullClone CloneMode = "fullClone" + // LinkedClone means resulting VMs will be dependent upon the snapshot of + // the source VM/template from which the VM was cloned. This is the fastest + // clone mode, but it also prevents expanding a VMs disk beyond the size of + // the source VM/template. + LinkedClone CloneMode = "linkedClone" +) + +// NetworkSpec defines the virtual machine's network configuration. +type NetworkSpec struct { + // Devices defines the virtual machine's network interfaces. + Devices []NetworkDeviceSpec `json:"devices"` +} + +// NetworkDeviceSpec defines the network configuration for a virtual machine's +// network device. +type NetworkDeviceSpec struct { + // NetworkName is the name of the vSphere network to which the device + // will be connected. + NetworkName string `json:"networkName"` +} + +// WorkspaceConfig defines a workspace configuration for the vSphere cloud +// provider. +type Workspace struct { + // Server is the IP address or FQDN of the vSphere endpoint. + // +optional + Server string `gcfg:"server,omitempty" json:"server,omitempty"` + // Datacenter is the datacenter in which VMs are created/located. + // +optional + Datacenter string `gcfg:"datacenter,omitempty" json:"datacenter,omitempty"` + // Folder is the folder in which VMs are created/located. + // +optional + Folder string `gcfg:"folder,omitempty" json:"folder,omitempty"` + // Datastore is the datastore in which VMs are created/located. + // +optional + Datastore string `gcfg:"default-datastore,omitempty" json:"datastore,omitempty"` + // ResourcePool is the resource pool in which VMs are created/located. + // +optional + ResourcePool string `gcfg:"resourcepool-path,omitempty" json:"resourcePool,omitempty"` +} + +// VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains VSphere-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type VSphereMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // InstanceID is the ID of the instance in VSphere + // +optional + InstanceID *string `json:"instanceId,omitempty"` + // InstanceState is the provisioning state of the VSphere Instance. + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + Conditions []metav1.Condition `json:"conditions,omitempty"` + // TaskRef is a managed object reference to a Task related to the machine. + // This value is set automatically at runtime and should not be set or + // modified by users. + // +optional + TaskRef string `json:"taskRef,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..4ac2a8a08 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1657 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.AMI.DeepCopyInto(&out.AMI) + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagSpecification, len(*in)) + copy(*out, *in) + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(AWSResourceReference) + (*in).DeepCopyInto(*out) + } + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]AWSResourceReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Subnet.DeepCopyInto(&out.Subnet) + out.Placement = in.Placement + if in.LoadBalancers != nil { + in, out := &in.LoadBalancers, &out.LoadBalancers + *out = make([]LoadBalancerReference, len(*in)) + copy(*out, *in) + } + if in.BlockDevices != nil { + in, out := &in.BlockDevices, &out.BlockDevices + *out = make([]BlockDeviceMappingSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpotMarketOptions != nil { + in, out := &in.SpotMarketOptions, &out.SpotMarketOptions + *out = new(SpotMarketOptions) + (*in).DeepCopyInto(*out) + } + out.MetadataServiceOptions = in.MetadataServiceOptions + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfig. +func (in *AWSMachineProviderConfig) DeepCopy() *AWSMachineProviderConfig { + if in == nil { + return nil + } + out := new(AWSMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderConfigList) DeepCopyInto(out *AWSMachineProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSMachineProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfigList. +func (in *AWSMachineProviderConfigList) DeepCopy() *AWSMachineProviderConfigList { + if in == nil { + return nil + } + out := new(AWSMachineProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderStatus) DeepCopyInto(out *AWSMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderStatus. +func (in *AWSMachineProviderStatus) DeepCopy() *AWSMachineProviderStatus { + if in == nil { + return nil + } + out := new(AWSMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]Filter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference. +func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { + if in == nil { + return nil + } + out := new(AWSResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachineProviderSpec) DeepCopyInto(out *AzureMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.SecretReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.SecretReference) + **out = **in + } + out.Image = in.Image + in.OSDisk.DeepCopyInto(&out.OSDisk) + if in.DataDisks != nil { + in, out := &in.DataDisks, &out.DataDisks + *out = make([]DataDisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ApplicationSecurityGroups != nil { + in, out := &in.ApplicationSecurityGroups, &out.ApplicationSecurityGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NatRule != nil { + in, out := &in.NatRule, &out.NatRule + *out = new(int64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.SpotVMOptions != nil { + in, out := &in.SpotVMOptions, &out.SpotVMOptions + *out = new(SpotVMOptions) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(SecurityProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderSpec. +func (in *AzureMachineProviderSpec) DeepCopy() *AzureMachineProviderSpec { + if in == nil { + return nil + } + out := new(AzureMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachineProviderStatus) DeepCopyInto(out *AzureMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.VMID != nil { + in, out := &in.VMID, &out.VMID + *out = new(string) + **out = **in + } + if in.VMState != nil { + in, out := &in.VMState, &out.VMState + *out = new(AzureVMState) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderStatus. +func (in *AzureMachineProviderStatus) DeepCopy() *AzureMachineProviderStatus { + if in == nil { + return nil + } + out := new(AzureMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingSpec) DeepCopyInto(out *BlockDeviceMappingSpec) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(EBSBlockDeviceSpec) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(string) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingSpec. +func (in *BlockDeviceMappingSpec) DeepCopy() *BlockDeviceMappingSpec { + if in == nil { + return nil + } + out := new(BlockDeviceMappingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDisk) DeepCopyInto(out *DataDisk) { + *out = *in + in.ManagedDisk.DeepCopyInto(&out.ManagedDisk) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDisk. +func (in *DataDisk) DeepCopy() *DataDisk { + if in == nil { + return nil + } + out := new(DataDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDiskManagedDiskParameters) DeepCopyInto(out *DataDiskManagedDiskParameters) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(DiskEncryptionSetParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDiskManagedDiskParameters. +func (in *DataDiskManagedDiskParameters) DeepCopy() *DataDiskManagedDiskParameters { + if in == nil { + return nil + } + out := new(DataDiskManagedDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetParameters. +func (in *DiskEncryptionSetParameters) DeepCopy() *DiskEncryptionSetParameters { + if in == nil { + return nil + } + out := new(DiskEncryptionSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSettings) DeepCopyInto(out *DiskSettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSettings. +func (in *DiskSettings) DeepCopy() *DiskSettings { + if in == nil { + return nil + } + out := new(DiskSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + in.KMSKey.DeepCopyInto(&out.KMSKey) + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(int64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(int64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceSpec. +func (in *EBSBlockDeviceSpec) DeepCopy() *EBSBlockDeviceSpec { + if in == nil { + return nil + } + out := new(EBSBlockDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPDisk) DeepCopyInto(out *GCPDisk) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(GCPEncryptionKeyReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDisk. +func (in *GCPDisk) DeepCopy() *GCPDisk { + if in == nil { + return nil + } + out := new(GCPDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPEncryptionKeyReference) DeepCopyInto(out *GCPEncryptionKeyReference) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(GCPKMSKeyReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPEncryptionKeyReference. +func (in *GCPEncryptionKeyReference) DeepCopy() *GCPEncryptionKeyReference { + if in == nil { + return nil + } + out := new(GCPEncryptionKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPGPUConfig) DeepCopyInto(out *GCPGPUConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPGPUConfig. +func (in *GCPGPUConfig) DeepCopy() *GCPGPUConfig { + if in == nil { + return nil + } + out := new(GCPGPUConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference. +func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { + if in == nil { + return nil + } + out := new(GCPKMSKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMachineProviderSpec) DeepCopyInto(out *GCPMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = make([]*GCPDisk, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GCPDisk) + (*in).DeepCopyInto(*out) + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]*GCPMetadata, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GCPMetadata) + (*in).DeepCopyInto(*out) + } + } + } + if in.NetworkInterfaces != nil { + in, out := &in.NetworkInterfaces, &out.NetworkInterfaces + *out = make([]*GCPNetworkInterface, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GCPNetworkInterface) + **out = **in + } + } + } + if in.ServiceAccounts != nil { + in, out := &in.ServiceAccounts, &out.ServiceAccounts + *out = make([]GCPServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TargetPools != nil { + in, out := &in.TargetPools, &out.TargetPools + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GPUs != nil { + in, out := &in.GPUs, &out.GPUs + *out = make([]GCPGPUConfig, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderSpec. +func (in *GCPMachineProviderSpec) DeepCopy() *GCPMachineProviderSpec { + if in == nil { + return nil + } + out := new(GCPMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GCPMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMachineProviderStatus) DeepCopyInto(out *GCPMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderStatus. +func (in *GCPMachineProviderStatus) DeepCopy() *GCPMachineProviderStatus { + if in == nil { + return nil + } + out := new(GCPMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMetadata) DeepCopyInto(out *GCPMetadata) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMetadata. +func (in *GCPMetadata) DeepCopy() *GCPMetadata { + if in == nil { + return nil + } + out := new(GCPMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPNetworkInterface) DeepCopyInto(out *GCPNetworkInterface) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPNetworkInterface. +func (in *GCPNetworkInterface) DeepCopy() *GCPNetworkInterface { + if in == nil { + return nil + } + out := new(GCPNetworkInterface) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPServiceAccount) DeepCopyInto(out *GCPServiceAccount) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceAccount. +func (in *GCPServiceAccount) DeepCopy() *GCPServiceAccount { + if in == nil { + return nil + } + out := new(GCPServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastOperation) DeepCopyInto(out *LastOperation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. +func (in *LastOperation) DeepCopy() *LastOperation { + if in == nil { + return nil + } + out := new(LastOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook. +func (in *LifecycleHook) DeepCopy() *LifecycleHook { + if in == nil { + return nil + } + out := new(LifecycleHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHooks) DeepCopyInto(out *LifecycleHooks) { + *out = *in + if in.PreDrain != nil { + in, out := &in.PreDrain, &out.PreDrain + *out = make([]LifecycleHook, len(*in)) + copy(*out, *in) + } + if in.PreTerminate != nil { + in, out := &in.PreTerminate, &out.PreTerminate + *out = make([]LifecycleHook, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHooks. +func (in *LifecycleHooks) DeepCopy() *LifecycleHooks { + if in == nil { + return nil + } + out := new(LifecycleHooks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerReference) DeepCopyInto(out *LoadBalancerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerReference. +func (in *LoadBalancerReference) DeepCopy() *LoadBalancerReference { + if in == nil { + return nil + } + out := new(LoadBalancerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Machine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheck) DeepCopyInto(out *MachineHealthCheck) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheck. +func (in *MachineHealthCheck) DeepCopy() *MachineHealthCheck { + if in == nil { + return nil + } + out := new(MachineHealthCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineHealthCheck) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheckList) DeepCopyInto(out *MachineHealthCheckList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineHealthCheck, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckList. +func (in *MachineHealthCheckList) DeepCopy() *MachineHealthCheckList { + if in == nil { + return nil + } + out := new(MachineHealthCheckList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineHealthCheckList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheckSpec) DeepCopyInto(out *MachineHealthCheckSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + if in.UnhealthyConditions != nil { + in, out := &in.UnhealthyConditions, &out.UnhealthyConditions + *out = make([]UnhealthyCondition, len(*in)) + copy(*out, *in) + } + if in.MaxUnhealthy != nil { + in, out := &in.MaxUnhealthy, &out.MaxUnhealthy + *out = new(intstr.IntOrString) + **out = **in + } + if in.NodeStartupTimeout != nil { + in, out := &in.NodeStartupTimeout, &out.NodeStartupTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.RemediationTemplate != nil { + in, out := &in.RemediationTemplate, &out.RemediationTemplate + *out = new(v1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckSpec. +func (in *MachineHealthCheckSpec) DeepCopy() *MachineHealthCheckSpec { + if in == nil { + return nil + } + out := new(MachineHealthCheckSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheckStatus) DeepCopyInto(out *MachineHealthCheckStatus) { + *out = *in + if in.ExpectedMachines != nil { + in, out := &in.ExpectedMachines, &out.ExpectedMachines + *out = new(int) + **out = **in + } + if in.CurrentHealthy != nil { + in, out := &in.CurrentHealthy, &out.CurrentHealthy + *out = new(int) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckStatus. +func (in *MachineHealthCheckStatus) DeepCopy() *MachineHealthCheckStatus { + if in == nil { + return nil + } + out := new(MachineHealthCheckStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineList) DeepCopyInto(out *MachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Machine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. +func (in *MachineList) DeepCopy() *MachineList { + if in == nil { + return nil + } + out := new(MachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSet) DeepCopyInto(out *MachineSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. +func (in *MachineSet) DeepCopy() *MachineSet { + if in == nil { + return nil + } + out := new(MachineSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. +func (in *MachineSetList) DeepCopy() *MachineSetList { + if in == nil { + return nil + } + out := new(MachineSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. +func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { + if in == nil { + return nil + } + out := new(MachineSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { + *out = *in + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(MachineSetStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. +func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { + if in == nil { + return nil + } + out := new(MachineSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.LifecycleHooks.DeepCopyInto(&out.LifecycleHooks) + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. +func (in *MachineSpec) DeepCopy() *MachineSpec { + if in == nil { + return nil + } + out := new(MachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { + *out = *in + if in.NodeRef != nil { + in, out := &in.NodeRef, &out.NodeRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(MachineStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1.NodeAddress, len(*in)) + copy(*out, *in) + } + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. +func (in *MachineStatus) DeepCopy() *MachineStatus { + if in == nil { + return nil + } + out := new(MachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. +func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { + if in == nil { + return nil + } + out := new(MachineTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataServiceOptions) DeepCopyInto(out *MetadataServiceOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataServiceOptions. +func (in *MetadataServiceOptions) DeepCopy() *MetadataServiceOptions { + if in == nil { + return nil + } + out := new(MetadataServiceOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDeviceSpec) DeepCopyInto(out *NetworkDeviceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceSpec. +func (in *NetworkDeviceSpec) DeepCopy() *NetworkDeviceSpec { + if in == nil { + return nil + } + out := new(NetworkDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]NetworkDeviceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + in.ManagedDisk.DeepCopyInto(&out.ManagedDisk) + out.DiskSettings = in.DiskSettings + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDiskManagedDiskParameters) DeepCopyInto(out *OSDiskManagedDiskParameters) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(DiskEncryptionSetParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDiskManagedDiskParameters. +func (in *OSDiskManagedDiskParameters) DeepCopy() *OSDiskManagedDiskParameters { + if in == nil { + return nil + } + out := new(OSDiskManagedDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]metav1.OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Placement) DeepCopyInto(out *Placement) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. +func (in *Placement) DeepCopy() *Placement { + if in == nil { + return nil + } + out := new(Placement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. +func (in *ProviderSpec) DeepCopy() *ProviderSpec { + if in == nil { + return nil + } + out := new(ProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfile) DeepCopyInto(out *SecurityProfile) { + *out = *in + if in.EncryptionAtHost != nil { + in, out := &in.EncryptionAtHost, &out.EncryptionAtHost + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfile. +func (in *SecurityProfile) DeepCopy() *SecurityProfile { + if in == nil { + return nil + } + out := new(SecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) { + *out = *in + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMarketOptions. +func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions { + if in == nil { + return nil + } + out := new(SpotMarketOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotVMOptions) DeepCopyInto(out *SpotVMOptions) { + *out = *in + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotVMOptions. +func (in *SpotVMOptions) DeepCopy() *SpotVMOptions { + if in == nil { + return nil + } + out := new(SpotVMOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagSpecification) DeepCopyInto(out *TagSpecification) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagSpecification. +func (in *TagSpecification) DeepCopy() *TagSpecification { + if in == nil { + return nil + } + out := new(TagSpecification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnhealthyCondition) DeepCopyInto(out *UnhealthyCondition) { + *out = *in + out.Timeout = in.Timeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnhealthyCondition. +func (in *UnhealthyCondition) DeepCopy() *UnhealthyCondition { + if in == nil { + return nil + } + out := new(UnhealthyCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Workspace != nil { + in, out := &in.Workspace, &out.Workspace + *out = new(Workspace) + **out = **in + } + in.Network.DeepCopyInto(&out.Network) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderSpec. +func (in *VSphereMachineProviderSpec) DeepCopy() *VSphereMachineProviderSpec { + if in == nil { + return nil + } + out := new(VSphereMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VSphereMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereMachineProviderStatus) DeepCopyInto(out *VSphereMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderStatus. +func (in *VSphereMachineProviderStatus) DeepCopy() *VSphereMachineProviderStatus { + if in == nil { + return nil + } + out := new(VSphereMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..af2d5b884 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,692 @@ +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AWSMachineProviderConfig = map[string]string{ + "": "AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "ami": "AMI is the reference to the AMI from which to create the machine instance.", + "instanceType": "InstanceType is the type of instance to create. Example: m4.xlarge", + "tags": "Tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", + "iamInstanceProfile": "IAMInstanceProfile is a reference to an IAM role to assign to the instance", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", + "keyName": "KeyName is the name of the KeyPair to use for SSH", + "deviceIndex": "DeviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", + "publicIp": "PublicIP specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", + "networkInterfaceType": "NetworkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", + "securityGroups": "SecurityGroups is an array of references to security groups that should be applied to the instance.", + "subnet": "Subnet is a reference to the subnet to use for this instance", + "placement": "Placement specifies where to create the instance in AWS", + "loadBalancers": "LoadBalancers is the set of load balancers to which the new instance should be added once it is created.", + "blockDevices": "BlockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", + "spotMarketOptions": "SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.", + "metadataServiceOptions": "MetadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", +} + +func (AWSMachineProviderConfig) SwaggerDoc() map[string]string { + return map_AWSMachineProviderConfig +} + +var map_AWSMachineProviderConfigList = map[string]string{ + "": "AWSMachineProviderConfigList contains a list of AWSMachineProviderConfig Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (AWSMachineProviderConfigList) SwaggerDoc() map[string]string { + return map_AWSMachineProviderConfigList +} + +var map_AWSMachineProviderStatus = map[string]string{ + "": "AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains AWS-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the instance ID of the machine created in AWS", + "instanceState": "InstanceState is the state of the AWS instance for this machine", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", +} + +func (AWSMachineProviderStatus) SwaggerDoc() map[string]string { + return map_AWSMachineProviderStatus +} + +var map_AWSResourceReference = map[string]string{ + "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.", + "id": "ID of resource", + "arn": "ARN of resource", + "filters": "Filters is a set of filters used to identify a resource", +} + +func (AWSResourceReference) SwaggerDoc() map[string]string { + return map_AWSResourceReference +} + +var map_BlockDeviceMappingSpec = map[string]string{ + "": "BlockDeviceMappingSpec describes a block device mapping", + "deviceName": "The device name exposed to the machine (for example, /dev/sdh or xvdh).", + "ebs": "Parameters used to automatically set up EBS volumes when the machine is launched.", + "noDevice": "Suppresses the specified device included in the block device mapping of the AMI.", + "virtualName": "The virtual device name (ephemeralN). Machine store volumes are numbered starting from 0. An machine type with 2 available machine store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available machine store volumes depends on the machine type. After you connect to the machine, you must mount the volume.\n\nConstraints: For M3 machines, you must specify machine store volumes in the block device mapping for the machine. When you launch an M3 machine, we ignore any machine store volumes specified in the block device mapping for the AMI.", +} + +func (BlockDeviceMappingSpec) SwaggerDoc() map[string]string { + return map_BlockDeviceMappingSpec +} + +var map_EBSBlockDeviceSpec = map[string]string{ + "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", + "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.", + "encrypted": "Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to machines that support Amazon EBS encryption.", + "kmsKey": "Indicates the KMS key that should be used to encrypt the Amazon EBS volume.", + "iops": "The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the Amazon Elastic Compute Cloud User Guide.\n\nMinimal and maximal IOPS for io1 and gp2 are constrained. Please, check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html for precise boundaries for individual volumes.\n\nCondition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.", + "volumeSize": "The size of the volume, in GiB.\n\nConstraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.\n\nDefault: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.", + "volumeType": "The volume type: gp2, io1, st1, sc1, or standard. Default: standard", +} + +func (EBSBlockDeviceSpec) SwaggerDoc() map[string]string { + return map_EBSBlockDeviceSpec +} + +var map_Filter = map[string]string{ + "": "Filter is a filter used to identify an AWS resource", + "name": "Name of the filter. Filter names are case-sensitive.", + "values": "Values includes one or more filter values. Filter values are case-sensitive.", +} + +func (Filter) SwaggerDoc() map[string]string { + return map_Filter +} + +var map_LoadBalancerReference = map[string]string{ + "": "LoadBalancerReference is a reference to a load balancer on AWS.", +} + +func (LoadBalancerReference) SwaggerDoc() map[string]string { + return map_LoadBalancerReference +} + +var map_MetadataServiceOptions = map[string]string{ + "": "MetadataServiceOptions defines the options available to a user when configuring Instance Metadata Service (IMDS) Options.", + "authentication": "Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", +} + +func (MetadataServiceOptions) SwaggerDoc() map[string]string { + return map_MetadataServiceOptions +} + +var map_Placement = map[string]string{ + "": "Placement indicates where to create the instance in AWS", + "region": "Region is the region to use to create the instance", + "availabilityZone": "AvailabilityZone is the availability zone of the instance", + "tenancy": "Tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", +} + +func (Placement) SwaggerDoc() map[string]string { + return map_Placement +} + +var map_SpotMarketOptions = map[string]string{ + "": "SpotMarketOptions defines the options available to a user when configuring Machines to run on Spot instances. Most users should provide an empty struct.", + "maxPrice": "The maximum price the user is willing to pay for their instances Default: On-Demand price", +} + +func (SpotMarketOptions) SwaggerDoc() map[string]string { + return map_SpotMarketOptions +} + +var map_TagSpecification = map[string]string{ + "": "TagSpecification is the name/value pair for a tag", + "name": "Name of the tag", + "value": "Value of the tag", +} + +func (TagSpecification) SwaggerDoc() map[string]string { + return map_TagSpecification +} + +var map_AzureMachineProviderSpec = map[string]string{ + "": "AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. Required parameters such as location that are not specified by this configuration, will be defaulted by the actuator. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with Azure credentials.", + "location": "Location is the region to use to create the instance", + "vmSize": "VMSize is the size of the VM to create.", + "image": "Image is the OS image to use to create the instance.", + "osDisk": "OSDisk represents the parameters for creating the OS disk.", + "dataDisks": "DataDisk specifies the parameters that are used to add one or more data disks to the machine.", + "sshPublicKey": "SSHPublicKey is the public key to use to SSH to the virtual machine.", + "publicIP": "PublicIP if true a public IP will be used", + "tags": "Tags is a list of tags to apply to the machine.", + "securityGroup": "Network Security Group that needs to be attached to the machine's interface. No security group will be attached if empty.", + "applicationSecurityGroups": "Application Security Groups that need to be attached to the machine's interface. No application security groups will be attached if zero-length.", + "subnet": "Subnet to use for this instance", + "publicLoadBalancer": "PublicLoadBalancer to use for this instance", + "internalLoadBalancer": "InternalLoadBalancerName to use for this instance", + "natRule": "NatRule to set inbound NAT rule of the load balancer", + "managedIdentity": "ManagedIdentity to set managed identity name", + "vnet": "Vnet to set virtual network name", + "zone": "Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone", + "networkResourceGroup": "NetworkResourceGroup is the resource group for the virtual machine's network", + "resourceGroup": "ResourceGroup is the resource group for the virtual machine", + "spotVMOptions": "SpotVMOptions allows the ability to specify the Machine should use a Spot VM", + "securityProfile": "SecurityProfile specifies the Security profile settings for a virtual machine.", + "ultraSSDCapability": "UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. This Azure feature is subject to a specific scope and certain limitations. More informations on this can be found in the official Azure documentation for Ultra Disks: (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).\n\nWhen omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). This may manifest in the Pod being stuck in `ContainerCreating` phase. This defaulting behaviour may be subject to change in future.\n\nWhen set to \"Enabled\", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. This will thus allow UltraSSD both as Data Disks and Persistent Volumes. If set to \"Enabled\" when the capability can't be available due to scope and limitations, the Machine will go into \"Failed\" state.\n\nWhen set to \"Disabled\", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a \"Failed\" state. If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.", + "acceleratedNetworking": "AcceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.", + "availabilitySet": "AvailabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.", +} + +func (AzureMachineProviderSpec) SwaggerDoc() map[string]string { + return map_AzureMachineProviderSpec +} + +var map_AzureMachineProviderStatus = map[string]string{ + "": "AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains Azure-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "vmId": "VMID is the ID of the virtual machine created in Azure.", + "vmState": "VMState is the provisioning state of the Azure virtual machine.", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status.", +} + +func (AzureMachineProviderStatus) SwaggerDoc() map[string]string { + return map_AzureMachineProviderStatus +} + +var map_DataDisk = map[string]string{ + "": "DataDisk specifies the parameters that are used to add one or more data disks to the machine. A Data Disk is a managed disk that's attached to a virtual machine to store application data. It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`.\n\nAs the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. At this stage the previously defined `lun` is to be used as the \"device\" key for referencing the raw disk device to be initialized. Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. For further guidance and examples, please refer to the official OpenShift docs.", + "nameSuffix": "NameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", + "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", + "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", + "lun": "Lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", + "cachingType": "CachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", + "deletionPolicy": "DeletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.", +} + +func (DataDisk) SwaggerDoc() map[string]string { + return map_DataDisk +} + +var map_DataDiskManagedDiskParameters = map[string]string{ + "": "DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk.", + "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\" and \"UltraSSD_LRS\".", + "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a DiskEncryptionSet with id: \"Default\".", +} + +func (DataDiskManagedDiskParameters) SwaggerDoc() map[string]string { + return map_DataDiskManagedDiskParameters +} + +var map_DiskEncryptionSetParameters = map[string]string{ + "": "DiskEncryptionSetParameters is the disk encryption set properties", + "id": "ID is the disk encryption set ID Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is: \"Default\".", +} + +func (DiskEncryptionSetParameters) SwaggerDoc() map[string]string { + return map_DiskEncryptionSetParameters +} + +var map_DiskSettings = map[string]string{ + "": "DiskSettings describe ephemeral disk settings for the os disk.", + "ephemeralStorageLocation": "EphemeralStorageLocation enables ephemeral OS when set to 'Local'. Possible values include: 'Local'. See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is that disks are saved to remote Azure storage.", +} + +func (DiskSettings) SwaggerDoc() map[string]string { + return map_DiskSettings +} + +var map_Image = map[string]string{ + "": "Image is a mirror of azure sdk compute.ImageReference", + "publisher": "Publisher is the name of the organization that created the image", + "offer": "Offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer", + "sku": "SKU specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter", + "version": "Version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.", + "resourceID": "ResourceID specifies an image to use by ID", + "type": "Type identifies the source of the image and related information, such as purchase plans. Valid values are \"ID\", \"MarketplaceWithPlan\", \"MarketplaceNoPlan\", and omitted, which means no opinion and the platform chooses a good default which may change over time. Currently that default is \"MarketplaceNoPlan\" if publisher data is supplied, or \"ID\" if not. For more information about purchase plans, see: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_OSDisk = map[string]string{ + "osType": "OSType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".", + "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the OS disk.", + "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", + "diskSettings": "DiskSettings describe ephemeral disk settings for the os disk.", + "cachingType": "CachingType specifies the caching requirements. Possible values include: 'None', 'ReadOnly', 'ReadWrite'. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `None`.", +} + +func (OSDisk) SwaggerDoc() map[string]string { + return map_OSDisk +} + +var map_OSDiskManagedDiskParameters = map[string]string{ + "": "OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk.", + "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".", + "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties", +} + +func (OSDiskManagedDiskParameters) SwaggerDoc() map[string]string { + return map_OSDiskManagedDiskParameters +} + +var map_SecurityProfile = map[string]string{ + "": "SecurityProfile specifies the Security profile settings for a virtual machine or virtual machine scale set.", + "encryptionAtHost": "This field indicates whether Host Encryption should be enabled or disabled for a virtual machine or virtual machine scale set. Default is disabled.", +} + +func (SecurityProfile) SwaggerDoc() map[string]string { + return map_SecurityProfile +} + +var map_SpotVMOptions = map[string]string{ + "": "SpotVMOptions defines the options relevant to running the Machine on Spot VMs", + "maxPrice": "MaxPrice defines the maximum price the user is willing to pay for Spot VM instances", +} + +func (SpotVMOptions) SwaggerDoc() map[string]string { + return map_SpotVMOptions +} + +var map_GCPDisk = map[string]string{ + "": "GCPDisk describes disks for GCP.", + "autoDelete": "AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", + "boot": "Boot indicates if this is a boot disk (default false).", + "sizeGb": "SizeGB is the size of the disk (in GB).", + "type": "Type is the type of the disk (eg: pd-standard).", + "image": "Image is the source image to create this disk.", + "labels": "Labels list of labels to apply to the disk.", + "encryptionKey": "EncryptionKey is the customer-supplied encryption key of the disk.", +} + +func (GCPDisk) SwaggerDoc() map[string]string { + return map_GCPDisk +} + +var map_GCPEncryptionKeyReference = map[string]string{ + "": "GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption.", + "kmsKey": "KMSKeyName is the reference KMS key, in the format", + "kmsKeyServiceAccount": "KMSKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.", +} + +func (GCPEncryptionKeyReference) SwaggerDoc() map[string]string { + return map_GCPEncryptionKeyReference +} + +var map_GCPGPUConfig = map[string]string{ + "": "GCPGPUConfig describes type and count of GPUs attached to the instance on GCP.", + "count": "Count is the number of GPUs to be attached to an instance.", + "type": "Type is the type of GPU to be attached to an instance. Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4", +} + +func (GCPGPUConfig) SwaggerDoc() map[string]string { + return map_GCPGPUConfig +} + +var map_GCPKMSKeyReference = map[string]string{ + "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key", + "name": "Name is the name of the customer managed encryption key to be used for the disk encryption.", + "keyRing": "KeyRing is the name of the KMS Key Ring which the KMS Key belongs to.", + "projectID": "ProjectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.", + "location": "Location is the GCP location in which the Key Ring exists.", +} + +func (GCPKMSKeyReference) SwaggerDoc() map[string]string { + return map_GCPKMSKeyReference +} + +var map_GCPMachineProviderSpec = map[string]string{ + "": "GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with GCP credentials.", + "canIPForward": "CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.", + "deletionProtection": "DeletionProtection whether the resource should be protected against deletion.", + "disks": "Disks is a list of disks to be attached to the VM.", + "labels": "Labels list of labels to apply to the VM.", + "gcpMetadata": "Metadata key/value pairs to apply to the VM.", + "networkInterfaces": "NetworkInterfaces is a list of network interfaces to be attached to the VM.", + "serviceAccounts": "ServiceAccounts is a list of GCP service accounts to be used by the VM.", + "tags": "Tags list of tags to apply to the VM.", + "targetPools": "TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool", + "machineType": "MachineType is the machine type to use for the VM.", + "region": "Region is the region in which the GCP machine provider will create the VM.", + "zone": "Zone is the zone in which the GCP machine provider will create the VM.", + "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", + "gpus": "GPUs is a list of GPUs to be attached to the VM.", + "preemptible": "Preemptible indicates if created instance is preemptible.", + "onHostMaintenance": "OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".", + "restartPolicy": "RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api", +} + +func (GCPMachineProviderSpec) SwaggerDoc() map[string]string { + return map_GCPMachineProviderSpec +} + +var map_GCPMachineProviderStatus = map[string]string{ + "": "GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains GCP-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the ID of the instance in GCP", + "instanceState": "InstanceState is the provisioning state of the GCP Instance.", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", +} + +func (GCPMachineProviderStatus) SwaggerDoc() map[string]string { + return map_GCPMachineProviderStatus +} + +var map_GCPMetadata = map[string]string{ + "": "GCPMetadata describes metadata for GCP.", + "key": "Key is the metadata key.", + "value": "Value is the metadata value.", +} + +func (GCPMetadata) SwaggerDoc() map[string]string { + return map_GCPMetadata +} + +var map_GCPNetworkInterface = map[string]string{ + "": "GCPNetworkInterface describes network interfaces for GCP", + "publicIP": "PublicIP indicates if true a public IP will be used", + "network": "Network is the network name.", + "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", + "subnetwork": "Subnetwork is the subnetwork name.", +} + +func (GCPNetworkInterface) SwaggerDoc() map[string]string { + return map_GCPNetworkInterface +} + +var map_GCPServiceAccount = map[string]string{ + "": "GCPServiceAccount describes service accounts for GCP.", + "email": "Email is the service account email.", + "scopes": "Scopes list of scopes to be assigned to the service account.", +} + +func (GCPServiceAccount) SwaggerDoc() map[string]string { + return map_GCPServiceAccount +} + +var map_LastOperation = map[string]string{ + "": "LastOperation represents the detail of the last performed operation on the MachineObject.", + "description": "Description is the human-readable description of the last operation.", + "lastUpdated": "LastUpdated is the timestamp at which LastOperation API was last-updated.", + "state": "State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc", + "type": "Type is the type of operation which was last performed. E.g. Create, Delete, Update etc", +} + +func (LastOperation) SwaggerDoc() map[string]string { + return map_LastOperation +} + +var map_LifecycleHook = map[string]string{ + "": "LifecycleHook represents a single instance of a lifecycle hook", + "name": "Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity.", + "owner": "Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook.", +} + +func (LifecycleHook) SwaggerDoc() map[string]string { + return map_LifecycleHook +} + +var map_LifecycleHooks = map[string]string{ + "": "LifecycleHooks allow users to pause operations on the machine at certain prefedined points within the machine lifecycle.", + "preDrain": "PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination.", + "preTerminate": "PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained.", +} + +func (LifecycleHooks) SwaggerDoc() map[string]string { + return map_LifecycleHooks +} + +var map_Machine = map[string]string{ + "": "Machine is the Schema for the machines API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (Machine) SwaggerDoc() map[string]string { + return map_Machine +} + +var map_MachineList = map[string]string{ + "": "MachineList contains a list of Machine Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (MachineList) SwaggerDoc() map[string]string { + return map_MachineList +} + +var map_MachineSpec = map[string]string{ + "": "MachineSpec defines the desired state of Machine", + "metadata": "ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node.", + "lifecycleHooks": "LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle.", + "taints": "The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints", + "providerSpec": "ProviderSpec details Provider-specific configuration to use during node creation.", + "providerID": "ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.", +} + +func (MachineSpec) SwaggerDoc() map[string]string { + return map_MachineSpec +} + +var map_MachineStatus = map[string]string{ + "": "MachineStatus defines the observed state of Machine", + "nodeRef": "NodeRef will point to the corresponding Node if it exists.", + "lastUpdated": "LastUpdated identifies when this status was last observed.", + "errorReason": "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "errorMessage": "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "providerStatus": "ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.", + "addresses": "Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.", + "lastOperation": "LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.", + "phase": "Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", + "conditions": "Conditions defines the current state of the Machine", +} + +func (MachineStatus) SwaggerDoc() map[string]string { + return map_MachineStatus +} + +var map_MachineHealthCheck = map[string]string{ + "": "MachineHealthCheck is the Schema for the machinehealthchecks API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "spec": "Specification of machine health check policy", + "status": "Most recently observed status of MachineHealthCheck resource", +} + +func (MachineHealthCheck) SwaggerDoc() map[string]string { + return map_MachineHealthCheck +} + +var map_MachineHealthCheckList = map[string]string{ + "": "MachineHealthCheckList contains a list of MachineHealthCheck Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (MachineHealthCheckList) SwaggerDoc() map[string]string { + return map_MachineHealthCheckList +} + +var map_MachineHealthCheckSpec = map[string]string{ + "": "MachineHealthCheckSpec defines the desired state of MachineHealthCheck", + "selector": "Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.", + "unhealthyConditions": "UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", + "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation.", + "nodeStartupTimeout": "Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to \"0\". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", + "remediationTemplate": "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", +} + +func (MachineHealthCheckSpec) SwaggerDoc() map[string]string { + return map_MachineHealthCheckSpec +} + +var map_MachineHealthCheckStatus = map[string]string{ + "": "MachineHealthCheckStatus defines the observed state of MachineHealthCheck", + "expectedMachines": "total number of machines counted by this machine health check", + "currentHealthy": "total number of machines counted by this machine health check", + "remediationsAllowed": "RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied", + "conditions": "Conditions defines the current state of the MachineHealthCheck", +} + +func (MachineHealthCheckStatus) SwaggerDoc() map[string]string { + return map_MachineHealthCheckStatus +} + +var map_UnhealthyCondition = map[string]string{ + "": "UnhealthyCondition represents a Node condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a node is considered unhealthy.", + "timeout": "Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", +} + +func (UnhealthyCondition) SwaggerDoc() map[string]string { + return map_UnhealthyCondition +} + +var map_MachineSet = map[string]string{ + "": "MachineSet ensures that a specified number of machines replicas are running at any given time. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (MachineSet) SwaggerDoc() map[string]string { + return map_MachineSet +} + +var map_MachineSetList = map[string]string{ + "": "MachineSetList contains a list of MachineSet Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (MachineSetList) SwaggerDoc() map[string]string { + return map_MachineSetList +} + +var map_MachineSetSpec = map[string]string{ + "": "MachineSetSpec defines the desired state of MachineSet", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", + "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", + "deletePolicy": "DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"", + "selector": "Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the machine that will be created if insufficient replicas are detected.", +} + +func (MachineSetSpec) SwaggerDoc() map[string]string { + return map_MachineSetSpec +} + +var map_MachineSetStatus = map[string]string{ + "": "MachineSetStatus defines the observed state of MachineSet", + "replicas": "Replicas is the most recently observed number of replicas.", + "fullyLabeledReplicas": "The number of replicas that have labels matching the labels of the machine template of the MachineSet.", + "readyReplicas": "The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is \"Ready\".", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this MachineSet.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed MachineSet.", + "errorReason": "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption.\n\nThese fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output.", +} + +func (MachineSetStatus) SwaggerDoc() map[string]string { + return map_MachineSetStatus +} + +var map_MachineTemplateSpec = map[string]string{ + "": "MachineTemplateSpec describes the data needed to create a Machine from a template", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (MachineTemplateSpec) SwaggerDoc() map[string]string { + return map_MachineTemplateSpec +} + +var map_Condition = map[string]string{ + "": "Condition defines an observation of a Machine API resource operational state.", + "type": "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", + "status": "Status of the condition, one of True, False, Unknown.", + "severity": "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", + "lastTransitionTime": "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", + "reason": "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty.", + "message": "A human readable message indicating details about the transition. This field may be empty.", +} + +func (Condition) SwaggerDoc() map[string]string { + return map_Condition +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. This is a copy of customizable fields from metav1.ObjectMeta.\n\nObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases and read-only fields which end up in the generated CRD validation, having it as a subset simplifies the API and some issues that can impact user experience.\n\nDuring the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, specifically `spec.metadata.creationTimestamp in body must be of type string: \"null\"`. The investigation showed that `controller-tools@v2` behaves differently than its previous version when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package.\n\nIn more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` had validation properties, including for `creationTimestamp` (metav1.Time). The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` which breaks validation because the field isn't marked as nullable.\n\nIn future versions, controller-tools@v2 might allow overriding the type and validation for embedded types. When that happens, this hack should be revisited.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_ProviderSpec = map[string]string{ + "": "ProviderSpec defines the configuration to use during node creation.", + "value": "Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.", +} + +func (ProviderSpec) SwaggerDoc() map[string]string { + return map_ProviderSpec +} + +var map_NetworkDeviceSpec = map[string]string{ + "": "NetworkDeviceSpec defines the network configuration for a virtual machine's network device.", + "networkName": "NetworkName is the name of the vSphere network to which the device will be connected.", +} + +func (NetworkDeviceSpec) SwaggerDoc() map[string]string { + return map_NetworkDeviceSpec +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec defines the virtual machine's network configuration.", + "devices": "Devices defines the virtual machine's network interfaces.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_VSphereMachineProviderSpec = map[string]string{ + "": "VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with vSphere credentials.", + "template": "Template is the name, inventory path, or instance UUID of the template used to clone new machines.", + "workspace": "Workspace describes the workspace to use for the machine.", + "network": "Network is the network configuration for this machine's VM.", + "numCPUs": "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", + "numCoresPerSocket": "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", + "memoryMiB": "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.", + "diskGiB": "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned.", + "snapshot": "Snapshot is the name of the snapshot from which the VM was cloned", + "cloneMode": "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots.", +} + +func (VSphereMachineProviderSpec) SwaggerDoc() map[string]string { + return map_VSphereMachineProviderSpec +} + +var map_VSphereMachineProviderStatus = map[string]string{ + "": "VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains VSphere-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the ID of the instance in VSphere", + "instanceState": "InstanceState is the provisioning state of the VSphere Instance.", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "taskRef": "TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.", +} + +func (VSphereMachineProviderStatus) SwaggerDoc() map[string]string { + return map_VSphereMachineProviderStatus +} + +var map_Workspace = map[string]string{ + "": "WorkspaceConfig defines a workspace configuration for the vSphere cloud provider.", + "server": "Server is the IP address or FQDN of the vSphere endpoint.", + "datacenter": "Datacenter is the datacenter in which VMs are created/located.", + "folder": "Folder is the folder in which VMs are created/located.", + "datastore": "Datastore is the datastore in which VMs are created/located.", + "resourcePool": "ResourcePool is the resource pool in which VMs are created/located.", +} + +func (Workspace) SwaggerDoc() map[string]string { + return map_Workspace +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/LICENSE b/vendor/github.com/openshift/cluster-api-actuator-pkg/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/clusterautoscalers.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/clusterautoscalers.go new file mode 100644 index 000000000..baad1f215 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/clusterautoscalers.go @@ -0,0 +1,21 @@ +package framework + +import ( + "context" + "fmt" + + caov1 "github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetClusterAutoscaler gets a ClusterAutoscaler by its name from the default machine API namespace. +func GetClusterAutoscaler(client runtimeclient.Client, name string) (*caov1.ClusterAutoscaler, error) { + clusterAutoscaler := &caov1.ClusterAutoscaler{} + key := runtimeclient.ObjectKey{Namespace: MachineAPINamespace, Name: name} + + if err := client.Get(context.Background(), key, clusterAutoscaler); err != nil { + return nil, fmt.Errorf("error querying api for ClusterAutoscaler object: %w", err) + } + + return clusterAutoscaler, nil +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/daemonset.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/daemonset.go new file mode 100644 index 000000000..099112710 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/daemonset.go @@ -0,0 +1,87 @@ +package framework + +import ( + "context" + "fmt" + + kappsapi "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetDaemonset gets deployment object by name and namespace. +func GetDaemonset(c client.Client, name, namespace string) (*kappsapi.DaemonSet, error) { + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + d := &kappsapi.DaemonSet{} + + if err := wait.PollImmediate(RetryMedium, WaitShort, func() (bool, error) { + if err := c.Get(context.TODO(), key, d); err != nil { + klog.Errorf("Error querying api for DaemonSet object %q: %v, retrying...", name, err) + return false, nil + } + return true, nil + }); err != nil { + return nil, fmt.Errorf("error getting DaemonSet %q: %v", name, err) + } + return d, nil +} + +// DeleteDaemonset deletes the specified deployment +func DeleteDaemonset(c client.Client, deployment *kappsapi.DaemonSet) error { + return wait.PollImmediate(RetryMedium, WaitShort, func() (bool, error) { + if err := c.Delete(context.TODO(), deployment); err != nil { + klog.Errorf("error querying api for DaemonSet object %q: %v, retrying...", deployment.Name, err) + return false, nil + } + return true, nil + }) +} + +// UpdateDaemonset updates the specified deployment +func UpdateDaemonset(c client.Client, name, namespace string, updated *kappsapi.DaemonSet) error { + return wait.PollImmediate(RetryMedium, WaitMedium, func() (bool, error) { + d, err := GetDeployment(c, name, namespace) + if err != nil { + klog.Errorf("Error getting DaemonSet: %v", err) + return false, nil + } + if err := c.Patch(context.TODO(), d, client.MergeFrom(updated)); err != nil { + klog.Errorf("error patching DaemonSet object %q: %v, retrying...", name, err) + return false, nil + } + return true, nil + }) +} + +// IsDaemonsetAvailable returns true if the deployment has one or more availabe replicas +func IsDaemonsetAvailable(c client.Client, name, namespace string) bool { + if err := wait.PollImmediate(RetryMedium, WaitLong, func() (bool, error) { + d, err := GetDaemonset(c, name, namespace) + if err != nil { + klog.Errorf("Error getting DaemonSet: %v", err) + return false, nil + } + if d.Status.NumberAvailable == 0 { + klog.Errorf("DaemonSet %q is not available. Status: %s", + d.Name, daemonsetInfo(d)) + return false, nil + } + klog.Infof("DaemonSet %q is available. Status: %s", + d.Name, daemonsetInfo(d)) + return true, nil + }); err != nil { + klog.Errorf("Error checking IsDaemonsetAvailable: %v", err) + return false + } + return true +} + +func daemonsetInfo(d *kappsapi.DaemonSet) string { + return fmt.Sprintf("(ready: %d, available: %d, unavailable: %d)", + d.Status.NumberReady, d.Status.NumberAvailable, d.Status.NumberUnavailable) +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/deployment.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/deployment.go new file mode 100644 index 000000000..69736fdaf --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/deployment.go @@ -0,0 +1,115 @@ +package framework + +import ( + "context" + "fmt" + "reflect" + + kappsapi "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetDeployment gets deployment object by name and namespace. +func GetDeployment(c client.Client, name, namespace string) (*kappsapi.Deployment, error) { + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + d := &kappsapi.Deployment{} + + if err := wait.PollImmediate(RetryShort, WaitShort, func() (bool, error) { + if err := c.Get(context.TODO(), key, d); err != nil { + klog.Errorf("Error querying api for Deployment object %q: %v, retrying...", name, err) + return false, nil + } + return true, nil + }); err != nil { + return nil, fmt.Errorf("error getting deployment %q: %v", name, err) + } + return d, nil +} + +// DeleteDeployment deletes the specified deployment +func DeleteDeployment(c client.Client, deployment *kappsapi.Deployment) error { + return wait.PollImmediate(RetryShort, WaitShort, func() (bool, error) { + if err := c.Delete(context.TODO(), deployment); err != nil { + klog.Errorf("error querying api for deployment object %q: %v, retrying...", deployment.Name, err) + return false, nil + } + return true, nil + }) +} + +// UpdateDeployment updates the specified deployment +func UpdateDeployment(c client.Client, name, namespace string, updated *kappsapi.Deployment) error { + return wait.PollImmediate(RetryShort, WaitMedium, func() (bool, error) { + d, err := GetDeployment(c, name, namespace) + if err != nil { + klog.Errorf("Error getting deployment: %v", err) + return false, nil + } + if err := c.Patch(context.TODO(), d, client.MergeFrom(updated)); err != nil { + klog.Errorf("error patching deployment object %q: %v, retrying...", name, err) + return false, nil + } + return true, nil + }) +} + +// IsDeploymentAvailable returns true if the deployment has one or more availabe replicas +func IsDeploymentAvailable(c client.Client, name, namespace string) bool { + if err := wait.PollImmediate(RetryShort, WaitLong, func() (bool, error) { + d, err := GetDeployment(c, name, namespace) + if err != nil { + klog.Errorf("Error getting deployment: %v", err) + return false, nil + } + if d.Status.AvailableReplicas < 1 { + klog.Errorf("Deployment %q is not available. Status: %s", + d.Name, deploymentInfo(d)) + return false, nil + } + klog.Infof("Deployment %q is available. Status: %s", + d.Name, deploymentInfo(d)) + return true, nil + }); err != nil { + klog.Errorf("Error checking isDeploymentAvailable: %v", err) + return false + } + return true +} + +// IsDeploymentSynced returns true if provided deployment spec matched one found on cluster +func IsDeploymentSynced(c client.Client, dep *kappsapi.Deployment, name, namespace string) bool { + d, err := GetDeployment(c, name, namespace) + if err != nil { + klog.Errorf("Error getting deployment: %v", err) + return false + } + if !reflect.DeepEqual(d.Spec, dep.Spec) { + klog.Errorf("Deployment %q is not updated. Spec is not equal to: %v", + d.Name, dep.Spec) + return false + } + klog.Infof("Deployment %q is updated. Spec is matched", d.Name) + return true +} + +// DeploymentHasContainer returns true if the deployment has container with the specified name +func DeploymentHasContainer(deployment *kappsapi.Deployment, containerName string) bool { + for _, container := range deployment.Spec.Template.Spec.Containers { + if container.Name == containerName { + return true + } + } + return false +} + +func deploymentInfo(d *kappsapi.Deployment) string { + return fmt.Sprintf("(replicas: %d, updated: %d, ready: %d, available: %d, unavailable: %d)", + d.Status.Replicas, d.Status.UpdatedReplicas, d.Status.ReadyReplicas, + d.Status.AvailableReplicas, d.Status.UnavailableReplicas) +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/framework.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/framework.go new file mode 100644 index 000000000..b277ad695 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/framework.go @@ -0,0 +1,184 @@ +package framework + +import ( + "context" + "errors" + "time" + + configv1 "github.com/openshift/api/config/v1" + cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +// Various constants used by E2E tests. +const ( + PollNodesReadyTimeout = 10 * time.Minute + ClusterKey = "machine.openshift.io/cluster-api-cluster" + MachineSetKey = "machine.openshift.io/cluster-api-machineset" + MachineAPINamespace = "openshift-machine-api" + GlobalInfrastuctureName = "cluster" + WorkerNodeRoleLabel = "node-role.kubernetes.io/worker" + RetryShort = 1 * time.Second + RetryMedium = 5 * time.Second + // DefaultMachineSetReplicas is the default number of replicas of a machineset + // if MachineSet.Spec.Replicas field is set to nil + DefaultMachineSetReplicas = 0 + MachinePhaseRunning = "Running" + MachinePhaseFailed = "Failed" + MachineRoleLabel = "machine.openshift.io/cluster-api-machine-role" + MachineTypeLabel = "machine.openshift.io/cluster-api-machine-type" + MachineAnnotationKey = "machine.openshift.io/machine" +) + +var ( + WaitShort = 1 * time.Minute + WaitMedium = 3 * time.Minute + WaitLong = 15 * time.Minute + WaitOverLong = 30 * time.Minute +) + +// GetInfrastructure fetches the global cluster infrastructure object. +func GetInfrastructure(c runtimeclient.Client) (*configv1.Infrastructure, error) { + infra := &configv1.Infrastructure{} + infraName := runtimeclient.ObjectKey{ + Name: GlobalInfrastuctureName, + } + + if err := c.Get(context.Background(), infraName, infra); err != nil { + return nil, err + } + + return infra, nil +} + +var platform configv1.PlatformType + +// GetPlatform fetches the PlatformType from the infrastructure object. +// Caches value after first successful retrieval. +func GetPlatform(c runtimeclient.Client) (configv1.PlatformType, error) { + // platform won't change during test run and might be cached + if platform != "" { + return platform, nil + } + infra, err := GetInfrastructure(c) + if err != nil { + return "", err + } + if infra.Status.PlatformStatus == nil { + return "", errors.New("platform status is not populated in infrastructure object") + } + platform = infra.Status.PlatformStatus.Type + return platform, nil +} + +// LoadClient returns a new controller-runtime client. +func LoadClient() (runtimeclient.Client, error) { + cfg, err := config.GetConfig() + if err != nil { + return nil, err + } + + return runtimeclient.New(cfg, runtimeclient.Options{}) +} + +// LoadClientset returns a new Kubernetes Clientset. +func LoadClientset() (*kubernetes.Clientset, error) { + cfg, err := config.GetConfig() + if err != nil { + return nil, err + } + + return kubernetes.NewForConfig(cfg) +} + +func WaitForStatusAvailableShort(client runtimeclient.Client, name string) bool { + return expectStatusAvailableIn(client, name, WaitShort) +} + +func WaitForStatusAvailableMedium(client runtimeclient.Client, name string) bool { + return expectStatusAvailableIn(client, name, WaitMedium) +} + +func WaitForStatusAvailableOverLong(client runtimeclient.Client, name string) bool { + return expectStatusAvailableIn(client, name, WaitOverLong) +} + +func expectStatusAvailableIn(client runtimeclient.Client, name string, timeout time.Duration) bool { + key := types.NamespacedName{ + Name: name, + } + clusterOperator := &configv1.ClusterOperator{} + + if err := wait.PollImmediate(RetryMedium, timeout, func() (bool, error) { + if err := client.Get(context.TODO(), key, clusterOperator); err != nil { + klog.Errorf("error querying api for OperatorStatus object: %v, retrying...", err) + return false, nil + } + if cov1helpers.IsStatusConditionFalse(clusterOperator.Status.Conditions, configv1.OperatorAvailable) { + klog.Errorf("Condition: %q is false", configv1.OperatorAvailable) + return false, nil + } + if cov1helpers.IsStatusConditionTrue(clusterOperator.Status.Conditions, configv1.OperatorProgressing) { + klog.Errorf("Condition: %q is true", configv1.OperatorProgressing) + return false, nil + } + if cov1helpers.IsStatusConditionTrue(clusterOperator.Status.Conditions, configv1.OperatorDegraded) { + klog.Errorf("Condition: %q is true", configv1.OperatorDegraded) + return false, nil + } + return true, nil + }); err != nil { + klog.Errorf("Error checking isStatusAvailable: %v", err) + return false + } + return true +} + +func WaitForValidatingWebhook(client runtimeclient.Client, name string) bool { + key := types.NamespacedName{Name: name} + webhook := &admissionregistrationv1.ValidatingWebhookConfiguration{} + + if err := wait.PollImmediate(RetryShort, WaitShort, func() (bool, error) { + if err := client.Get(context.TODO(), key, webhook); err != nil { + klog.Errorf("error querying api for ValidatingWebhookConfiguration: %v, retrying...", err) + return false, nil + } + + return true, nil + }); err != nil { + klog.Errorf("Error waiting for ValidatingWebhookConfiguration: %v", err) + return false + } + + return true +} + +// WaitForEvent expects to find the given event +func WaitForEvent(c runtimeclient.Client, kind, name, reason string) error { + return wait.PollImmediate(RetryMedium, WaitMedium, func() (bool, error) { + eventList := corev1.EventList{} + if err := c.List(context.Background(), &eventList); err != nil { + klog.Errorf("error querying api for eventList object: %v, retrying...", err) + return false, nil + } + + for _, event := range eventList.Items { + if event.Reason != reason || + event.InvolvedObject.Kind != kind || + event.InvolvedObject.Name != name { + continue + } + + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/jobs.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/jobs.go new file mode 100644 index 000000000..6f500a429 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/jobs.go @@ -0,0 +1,63 @@ +package framework + +import ( + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +func NewWorkLoad(njobs int32, memoryRequest resource.Quantity, workloadJobName string, + testLabel string, nodeSelector string, podLabel string) *batchv1.Job { + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: workloadJobName, + Namespace: "default", + Labels: map[string]string{testLabel: ""}, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: workloadJobName, + Image: "busybox", + Command: []string{ + "sleep", + "86400", // 1 day + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "memory": memoryRequest, + "cpu": resource.MustParse("500m"), + }, + }, + }, + }, + RestartPolicy: corev1.RestartPolicy("Never"), + Tolerations: []corev1.Toleration{ + { + Key: "kubemark", + Operator: corev1.TolerationOpExists, + }, + }, + }, + }, + BackoffLimit: pointer.Int32Ptr(4), + Completions: pointer.Int32Ptr(njobs), + Parallelism: pointer.Int32Ptr(njobs), + }, + } + if nodeSelector != "" { + job.Spec.Template.Spec.NodeSelector = map[string]string{ + nodeSelector: "", + } + } + if podLabel != "" { + job.Spec.Template.ObjectMeta.Labels = map[string]string{ + podLabel: "", + } + } + return job +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/machinehealthcheck.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/machinehealthcheck.go new file mode 100644 index 000000000..8bae1ebfc --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/machinehealthcheck.go @@ -0,0 +1,50 @@ +package framework + +import ( + "context" + + machinev1 "github.com/openshift/api/machine/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// MachineHealthCheckParams represents the parameters for creating a +// new MachineHealthCheck resource for use in tests. +type MachineHealthCheckParams struct { + Name string + Labels map[string]string + Conditions []machinev1.UnhealthyCondition + MaxUnhealthy *int +} + +// CreateMHC creates a new MachineHealthCheck resource. +func CreateMHC(c client.Client, params MachineHealthCheckParams) (*machinev1.MachineHealthCheck, error) { + mhc := &machinev1.MachineHealthCheck{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "machine.openshift.io/v1beta1", + Kind: "MachineHealthCheck", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: params.Name, + Namespace: MachineAPINamespace, + }, + Spec: machinev1.MachineHealthCheckSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: params.Labels, + }, + UnhealthyConditions: params.Conditions, + }, + } + + if params.MaxUnhealthy != nil { + maxUnhealthy := intstr.FromInt(*params.MaxUnhealthy) + mhc.Spec.MaxUnhealthy = &maxUnhealthy + } + + if err := c.Create(context.Background(), mhc); err != nil { + return nil, err + } + + return mhc, nil +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/machines.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/machines.go new file mode 100644 index 000000000..dd63bd18c --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/machines.go @@ -0,0 +1,159 @@ +package framework + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/gomega" + + machinev1 "github.com/openshift/api/machine/v1beta1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// FilterMachines returns a slice of only those Machines in the input that are +// in the requested phase. +func FilterMachines(machines []*machinev1.Machine, phase string) []*machinev1.Machine { + var result []*machinev1.Machine + + for i, m := range machines { + if m.Status.Phase != nil && *m.Status.Phase == phase { + result = append(result, machines[i]) + } + } + + return result +} + +// FilterRunningMachines returns a slice of only those Machines in the input +// that are in the "Running" phase. +func FilterRunningMachines(machines []*machinev1.Machine) []*machinev1.Machine { + return FilterMachines(machines, MachinePhaseRunning) +} + +// GetMachine get a machine by its name from the default machine API namespace. +func GetMachine(c client.Client, name string) (*machinev1.Machine, error) { + machine := &machinev1.Machine{} + key := client.ObjectKey{Namespace: MachineAPINamespace, Name: name} + + if err := c.Get(context.Background(), key, machine); err != nil { + return nil, fmt.Errorf("error querying api for machine object: %w", err) + } + + return machine, nil +} + +// MachinesPresent search for each provided machine in `machines` argument in the predefined `existingMachines` list +// and returns true when all of them were found +func MachinesPresent(existingMachines []*machinev1.Machine, machines ...*machinev1.Machine) bool { + if len(existingMachines) < len(machines) { + return false + } + existingMachinesMap := map[types.UID]struct{}{} + for _, existing := range existingMachines { + existingMachinesMap[existing.UID] = struct{}{} + } + for _, machine := range machines { + if _, found := existingMachinesMap[machine.UID]; !found { + return false + } + } + return true +} + +// GetMachines gets a list of machinesets from the default machine API namespace. +// Optionaly, labels may be used to constrain listed machinesets. +func GetMachines(client runtimeclient.Client, selectors ...*metav1.LabelSelector) ([]*machinev1.Machine, error) { + machineList := &machinev1.MachineList{} + + listOpts := append([]runtimeclient.ListOption{}, + runtimeclient.InNamespace(MachineAPINamespace), + ) + + for _, selector := range selectors { + s, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return nil, err + } + + listOpts = append(listOpts, + runtimeclient.MatchingLabelsSelector{Selector: s}, + ) + } + + if err := client.List(context.Background(), machineList, listOpts...); err != nil { + return nil, fmt.Errorf("error querying api for machineList object: %w", err) + } + + var machines []*machinev1.Machine + + for i := range machineList.Items { + machines = append(machines, &machineList.Items[i]) + } + + return machines, nil +} + +// GetMachineFromNode returns the Machine associated with the given node. +func GetMachineFromNode(client runtimeclient.Client, node *corev1.Node) (*machinev1.Machine, error) { + machineNamespaceKey, ok := node.Annotations[MachineAnnotationKey] + if !ok { + return nil, fmt.Errorf("node %q does not have a MachineAnnotationKey %q", + node.Name, MachineAnnotationKey) + } + namespace, machineName, err := cache.SplitMetaNamespaceKey(machineNamespaceKey) + if err != nil { + return nil, fmt.Errorf("machine annotation format is incorrect %v: %v", + machineNamespaceKey, err) + } + + if namespace != MachineAPINamespace { + return nil, fmt.Errorf("Machine %q is forbidden to live outside of default %v namespace", + machineNamespaceKey, MachineAPINamespace) + } + + machine, err := GetMachine(client, machineName) + if err != nil { + return nil, fmt.Errorf("error querying api for machine object: %w", err) + } + + return machine, nil +} + +// DeleteMachines deletes the specified machines and returns an error on failure. +func DeleteMachines(client runtimeclient.Client, machines ...*machinev1.Machine) error { + return wait.PollImmediate(RetryShort, time.Minute, func() (bool, error) { + for _, machine := range machines { + if err := client.Delete(context.TODO(), machine); err != nil { + klog.Errorf("Error querying api for machine object %q: %v, retrying...", machine.Name, err) + return false, err + } + } + return true, nil + }) +} + +// WaitForMachinesDeleted polls until the given Machines are not found. +func WaitForMachinesDeleted(c client.Client, machines ...*machinev1.Machine) { + Eventually(func() bool { + for _, m := range machines { + err := c.Get(context.Background(), client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNamespace(), + }, &machinev1.Machine{}) + if !apierrors.IsNotFound(err) { + return false // Not deleted, or other error. + } + } + + return true // Everything was deleted. + }, WaitLong, RetryMedium).Should(BeTrue()) +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/machinesets.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/machinesets.go new file mode 100644 index 000000000..24f992e50 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/machinesets.go @@ -0,0 +1,398 @@ +package framework + +import ( + "context" + "fmt" + + "github.com/google/uuid" + . "github.com/onsi/gomega" + + machinev1 "github.com/openshift/api/machine/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/scale" + "k8s.io/klog" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +// MachineSetParams represents the parameters for creating a new MachineSet +// resource for use in tests. +type MachineSetParams struct { + Name string + Replicas int32 + Labels map[string]string + ProviderSpec *machinev1.ProviderSpec +} + +const ( + machineAPIGroup = "machine.openshift.io" +) + +func BuildMachineSetParams(client runtimeclient.Client, replicas int) MachineSetParams { + // Get the current workers MachineSets so we can copy a ProviderSpec + // from one to use with our new dedicated MachineSet. + workers, err := GetWorkerMachineSets(client) + Expect(err).ToNot(HaveOccurred()) + + providerSpec := workers[0].Spec.Template.Spec.ProviderSpec.DeepCopy() + clusterName := workers[0].Spec.Template.Labels[ClusterKey] + + clusterInfra, err := GetInfrastructure(client) + Expect(err).NotTo(HaveOccurred()) + Expect(clusterInfra.Status.InfrastructureName).ShouldNot(BeEmpty()) + + uid, err := uuid.NewUUID() + Expect(err).NotTo(HaveOccurred()) + + return MachineSetParams{ + Name: clusterInfra.Status.InfrastructureName, + Replicas: int32(replicas), + ProviderSpec: providerSpec, + Labels: map[string]string{ + "e2e.openshift.io": uid.String(), + ClusterKey: clusterName, + }, + } +} + +// CreateMachineSet creates a new MachineSet resource. +func CreateMachineSet(c client.Client, params MachineSetParams) (*machinev1.MachineSet, error) { + ms := &machinev1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineSet", + APIVersion: "machine.openshift.io/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: params.Name, + Namespace: MachineAPINamespace, + Labels: params.Labels, + }, + Spec: machinev1.MachineSetSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: params.Labels, + }, + Template: machinev1.MachineTemplateSpec{ + ObjectMeta: machinev1.ObjectMeta{ + Labels: params.Labels, + }, + Spec: machinev1.MachineSpec{ + ObjectMeta: machinev1.ObjectMeta{ + Labels: params.Labels, + }, + ProviderSpec: *params.ProviderSpec, + }, + }, + Replicas: pointer.Int32Ptr(params.Replicas), + }, + } + + if err := c.Create(context.Background(), ms); err != nil { + return nil, err + } + + return ms, nil +} + +// GetMachineSets gets a list of machinesets from the default machine API namespace. +// Optionaly, labels may be used to constrain listed machinesets. +func GetMachineSets(client runtimeclient.Client, selectors ...*metav1.LabelSelector) ([]*machinev1.MachineSet, error) { + machineSetList := &machinev1.MachineSetList{} + + listOpts := append([]runtimeclient.ListOption{}, + runtimeclient.InNamespace(MachineAPINamespace), + ) + + for _, selector := range selectors { + s, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return nil, err + } + + listOpts = append(listOpts, + runtimeclient.MatchingLabelsSelector{Selector: s}, + ) + } + + if err := client.List(context.Background(), machineSetList, listOpts...); err != nil { + return nil, fmt.Errorf("error querying api for machineSetList object: %w", err) + } + + machineSets := []*machinev1.MachineSet{} + for _, ms := range machineSetList.Items { + machineSet := ms + machineSets = append(machineSets, &machineSet) + } + + return machineSets, nil +} + +// GetMachineSet gets a machineset by its name from the default machine API namespace. +func GetMachineSet(client runtimeclient.Client, name string) (*machinev1.MachineSet, error) { + machineSet := &machinev1.MachineSet{} + key := runtimeclient.ObjectKey{Namespace: MachineAPINamespace, Name: name} + + if err := client.Get(context.Background(), key, machineSet); err != nil { + return nil, fmt.Errorf("error querying api for machineSet object: %w", err) + } + + return machineSet, nil +} + +// GetWorkerMachineSets returns the MachineSets that label their Machines with +// the "worker" role. +func GetWorkerMachineSets(client runtimeclient.Client) ([]*machinev1.MachineSet, error) { + machineSets := &machinev1.MachineSetList{} + + if err := client.List(context.Background(), machineSets); err != nil { + return nil, err + } + + var result []*machinev1.MachineSet + + // The OpenShift installer does not label MachinSets with a type or role, + // but the Machines themselves are labled as such via the template., so we + // can reach into the template and check the lables there. + for i, ms := range machineSets.Items { + labels := ms.Spec.Template.ObjectMeta.Labels + + if labels == nil { + continue + } + + if labels[MachineRoleLabel] == "worker" { + result = append(result, &machineSets.Items[i]) + } + } + + if len(result) < 1 { + return nil, fmt.Errorf("no worker MachineSets found") + } + + return result, nil +} + +// GetMachinesFromMachineSet returns an array of machines owned by a given machineSet +func GetMachinesFromMachineSet(client runtimeclient.Client, machineSet *machinev1.MachineSet) ([]*machinev1.Machine, error) { + machines, err := GetMachines(client) + if err != nil { + return nil, fmt.Errorf("error getting machines: %w", err) + } + var machinesForSet []*machinev1.Machine + for key := range machines { + if metav1.IsControlledBy(machines[key], machineSet) { + machinesForSet = append(machinesForSet, machines[key]) + } + } + return machinesForSet, nil +} + +// NewMachineSet returns a new MachineSet object. +func NewMachineSet( + clusterName, namespace, name string, + selectorLabels map[string]string, + templateLabels map[string]string, + providerSpec *machinev1.ProviderSpec, + replicas int32, +) *machinev1.MachineSet { + ms := machinev1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineSet", + APIVersion: "machine.openshift.io/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + ClusterKey: clusterName, + }, + }, + Spec: machinev1.MachineSetSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + ClusterKey: clusterName, + MachineSetKey: name, + }, + }, + Template: machinev1.MachineTemplateSpec{ + ObjectMeta: machinev1.ObjectMeta{ + Labels: map[string]string{ + ClusterKey: clusterName, + MachineSetKey: name, + }, + }, + Spec: machinev1.MachineSpec{ + ProviderSpec: *providerSpec.DeepCopy(), + }, + }, + Replicas: pointer.Int32Ptr(replicas), + }, + } + + // Copy additional labels but do not overwrite those that + // already exist. + for k, v := range selectorLabels { + if _, exists := ms.Spec.Selector.MatchLabels[k]; !exists { + ms.Spec.Selector.MatchLabels[k] = v + } + } + for k, v := range templateLabels { + if _, exists := ms.Spec.Template.ObjectMeta.Labels[k]; !exists { + ms.Spec.Template.ObjectMeta.Labels[k] = v + } + } + + return &ms +} + +// ScaleMachineSet scales a machineSet with a given name to the given number of replicas +func ScaleMachineSet(name string, replicas int) error { + scaleClient, err := getScaleClient() + if err != nil { + return fmt.Errorf("error calling getScaleClient %w", err) + } + + scale, err := scaleClient.Scales(MachineAPINamespace).Get(context.Background(), schema.GroupResource{Group: machineAPIGroup, Resource: "MachineSet"}, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error calling scaleClient.Scales get: %w", err) + } + + scaleUpdate := scale.DeepCopy() + scaleUpdate.Spec.Replicas = int32(replicas) + _, err = scaleClient.Scales(MachineAPINamespace).Update(context.Background(), schema.GroupResource{Group: machineAPIGroup, Resource: "MachineSet"}, scaleUpdate, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("error calling scaleClient.Scales update: %w", err) + } + return nil +} + +// getScaleClient returns a ScalesGetter object to manipulate scale subresources +func getScaleClient() (scale.ScalesGetter, error) { + cfg, err := config.GetConfig() + if err != nil { + return nil, fmt.Errorf("error getting config %w", err) + } + mapper, err := apiutil.NewDiscoveryRESTMapper(cfg) + if err != nil { + return nil, fmt.Errorf("error calling NewDiscoveryRESTMapper %w", err) + } + + discovery := discovery.NewDiscoveryClientForConfigOrDie(cfg) + scaleKindResolver := scale.NewDiscoveryScaleKindResolver(discovery) + scaleClient, err := scale.NewForConfig(cfg, mapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver) + if err != nil { + return nil, fmt.Errorf("error calling building scale client %w", err) + } + return scaleClient, nil +} + +// WaitForMachineSet waits for the all Machines belonging to the named +// MachineSet to enter the "Running" phase, and for all nodes belonging to those +// Machines to be ready. If a Machine is detected in "Failed" phase, the test +// will exit early. +func WaitForMachineSet(c client.Client, name string) { + machineSet, err := GetMachineSet(c, name) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() error { + machines, err := GetMachinesFromMachineSet(c, machineSet) + if err != nil { + return err + } + + replicas := pointer.Int32PtrDerefOr(machineSet.Spec.Replicas, 0) + + if len(machines) != int(replicas) { + return fmt.Errorf("%q: found %d Machines, but MachineSet has %d replicas", + name, len(machines), int(replicas)) + } + + failed := FilterMachines(machines, MachinePhaseFailed) + if len(failed) > 0 { + // if there are failed machines, print them out before we exit + klog.Errorf("found %d Machines in failed phase: ", len(failed)) + for _, m := range failed { + reason := "failureReason not present in Machine.status" + if m.Status.ErrorReason != nil { + reason = string(*m.Status.ErrorReason) + } + message := "failureMessage not present in Machine.status" + if m.Status.ErrorMessage != nil { + message = string(*m.Status.ErrorMessage) + } + klog.Errorf("Failed machine: %s, Reason: %s, Message: %s", m.Name, reason, message) + } + } + Expect(len(failed)).To(Equal(0)) + + running := FilterRunningMachines(machines) + + // This could probably be smarter, but seems fine for now. + if len(running) != len(machines) { + return fmt.Errorf("%q: not all Machines are running: %d of %d", + name, len(running), len(machines)) + } + + for _, m := range running { + node, err := GetNodeForMachine(c, m) + if err != nil { + return err + } + + if !IsNodeReady(node) { + return fmt.Errorf("%s: node is not ready", node.Name) + } + } + + return nil + }, WaitOverLong, RetryMedium).ShouldNot(HaveOccurred()) +} + +// WaitForMachineSetDelete polls until the given MachineSet is not found, and +// there are zero Machines found matching the MachineSet's label selector. +func WaitForMachineSetDelete(c runtimeclient.Client, machineSet *machinev1.MachineSet) { + WaitForMachineSetsDeleted(c, machineSet) +} + +// WaitForMachineSetsDeleted polls until the given MachineSets are not found, and +// there are zero Machines found matching the MachineSet's label selector. +func WaitForMachineSetsDeleted(c runtimeclient.Client, machineSets ...*machinev1.MachineSet) { + for _, ms := range machineSets { + Eventually(func() bool { + selector := ms.Spec.Selector + + machines, err := GetMachines(c, &selector) + if err != nil || len(machines) != 0 { + return false // Still have Machines, or other error. + } + + err = c.Get(context.Background(), runtimeclient.ObjectKey{ + Name: ms.GetName(), + Namespace: ms.GetNamespace(), + }, &machinev1.MachineSet{}) + + if !apierrors.IsNotFound(err) { + return false // MachineSet not deleted, or other error. + } + + return true // MachineSet and Machines were deleted. + }, WaitLong, RetryMedium).Should(BeTrue()) + } +} + +// DeleteMachineSets deletes the specified machinesets and returns an error on failure. +func DeleteMachineSets(client runtimeclient.Client, machineSets ...*machinev1.MachineSet) error { + for _, ms := range machineSets { + if err := client.Delete(context.TODO(), ms); err != nil { + klog.Errorf("Error querying api for machine object %q: %v, retrying...", ms.Name, err) + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/nodes.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/nodes.go new file mode 100644 index 000000000..a6ffd60ec --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/nodes.go @@ -0,0 +1,316 @@ +package framework + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + machinev1 "github.com/openshift/api/machine/v1beta1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// AddNodeCondition adds a condition in the given Node's status. +func AddNodeCondition(c client.Client, node *corev1.Node, cond corev1.NodeCondition) error { + nodeCopy := node.DeepCopy() + nodeCopy.Status.Conditions = append(nodeCopy.Status.Conditions, cond) + + return c.Status().Patch(context.Background(), nodeCopy, client.MergeFrom(node)) +} + +// FilterReadyNodes fileter the list of nodes and returns the list with ready nodes +func FilterReadyNodes(nodes []corev1.Node) []corev1.Node { + var readyNodes []corev1.Node + for _, n := range nodes { + if IsNodeReady(&n) { + readyNodes = append(readyNodes, n) + } + } + return readyNodes +} + +// GetNodes gets a list of nodes from a running cluster +// Optionaly, labels may be used to constrain listed nodes. +func GetNodes(c client.Client, selectors ...*metav1.LabelSelector) ([]corev1.Node, error) { + var listOpts []client.ListOption + + nodeList := corev1.NodeList{} + + for _, selector := range selectors { + s, err := metav1.LabelSelectorAsSelector(selector) + if err != nil { + return nil, err + } + + listOpts = append(listOpts, + client.MatchingLabelsSelector{Selector: s}, + ) + } + + if err := c.List(context.TODO(), &nodeList, listOpts...); err != nil { + return nil, fmt.Errorf("error querying api for nodeList object: %w", err) + } + + return nodeList.Items, nil +} + +// GetNodesFromMachineSet returns an array of nodes backed by machines owned by a given machineSet +func GetNodesFromMachineSet(client runtimeclient.Client, machineSet *machinev1.MachineSet) ([]*corev1.Node, error) { + machines, err := GetMachinesFromMachineSet(client, machineSet) + if err != nil { + return nil, fmt.Errorf("error calling getMachinesFromMachineSet %w", err) + } + + var nodes []*corev1.Node + for key := range machines { + node, err := GetNodeForMachine(client, machines[key]) + if apierrors.IsNotFound(err) { + // We don't care about not found errors. + // Callers should account for the number of nodes being correct or not. + klog.Infof("No Node object found for machine %s", machines[key].Name) + continue + } else if err != nil { + return nil, fmt.Errorf("error getting node from machine %q: %w", machines[key].Name, err) + } + nodes = append(nodes, node) + } + klog.Infof("MachineSet %q have %d nodes", machineSet.Name, len(nodes)) + return nodes, nil +} + +// GetNodeForMachine retrieves the node backing the given Machine. +func GetNodeForMachine(c client.Client, m *machinev1.Machine) (*corev1.Node, error) { + if m.Status.NodeRef == nil { + return nil, fmt.Errorf("%s: machine has no NodeRef", m.Name) + } + + node := &corev1.Node{} + nodeName := client.ObjectKey{Name: m.Status.NodeRef.Name} + + if err := c.Get(context.Background(), nodeName, node); err != nil { + return nil, err + } + + return node, nil +} + +// GetWorkerNodes returns all nodes with the nodeWorkerRoleLabel label +func GetWorkerNodes(c client.Client) ([]corev1.Node, error) { + workerNodes := &corev1.NodeList{} + err := c.List(context.TODO(), workerNodes, + client.InNamespace(MachineAPINamespace), + client.MatchingLabels(map[string]string{WorkerNodeRoleLabel: ""}), + ) + + if err != nil { + return nil, err + } + + return workerNodes.Items, nil +} + +// IsNodeReady returns true if the given node is ready. +func IsNodeReady(node *corev1.Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == corev1.NodeReady { + return c.Status == corev1.ConditionTrue + } + } + return false +} + +// NodesAreReady returns true if an array of nodes are all ready +func NodesAreReady(nodes []*corev1.Node) bool { + // All nodes needs to be ready + for key := range nodes { + if !IsNodeReady(nodes[key]) { + klog.Errorf("Node %q is not ready. Conditions are: %v", nodes[key].Name, nodes[key].Status.Conditions) + return false + } + klog.Infof("Node %q is ready. Conditions are: %v", nodes[key].Name, nodes[key].Status.Conditions) + } + return true +} + +func VerifyNodeDraining(client runtimeclient.Client, targetMachine *machinev1.Machine, rc *corev1.ReplicationController) (string, error) { + endTime := time.Now().Add(time.Duration(WaitLong)) + var drainedNodeName string + err := wait.PollImmediate(RetryMedium, WaitLong, func() (bool, error) { + machine := machinev1.Machine{} + + key := types.NamespacedName{ + Namespace: targetMachine.Namespace, + Name: targetMachine.Name, + } + if err := client.Get(context.TODO(), key, &machine); err != nil { + klog.Errorf("Error querying api machine %q object: %v, retrying...", targetMachine.Name, err) + return false, nil + } + if machine.Status.NodeRef == nil || machine.Status.NodeRef.Kind != "Node" { + klog.Errorf("Machine %q not linked to a node", machine.Name) + return false, nil + } + + drainedNodeName = machine.Status.NodeRef.Name + node := corev1.Node{} + + if err := client.Get(context.TODO(), types.NamespacedName{Name: drainedNodeName}, &node); err != nil { + klog.Errorf("Error querying api node %q object: %v, retrying...", drainedNodeName, err) + return false, nil + } + + if !node.Spec.Unschedulable { + klog.Errorf("Node %q is expected to be marked as unschedulable, it is not", node.Name) + return false, nil + } + + klog.Infof("[remaining %s] Node %q is mark unschedulable as expected", remainingTime(endTime), node.Name) + + pods := corev1.PodList{} + if err := client.List(context.TODO(), &pods, runtimeclient.MatchingLabels(rc.Spec.Selector)); err != nil { + klog.Errorf("Error querying api for Pods object: %v, retrying...", err) + return false, nil + } + + podCounter := 0 + for _, pod := range pods.Items { + if pod.Spec.NodeName != machine.Status.NodeRef.Name { + continue + } + if !pod.DeletionTimestamp.IsZero() { + continue + } + podCounter++ + } + + klog.Infof("[remaining %s] Have %v pods scheduled to node %q", remainingTime(endTime), podCounter, machine.Status.NodeRef.Name) + + // Verify we have enough pods running as well + rcObj := corev1.ReplicationController{} + key = types.NamespacedName{ + Namespace: rc.Namespace, + Name: rc.Name, + } + if err := client.Get(context.TODO(), key, &rcObj); err != nil { + klog.Errorf("Error querying api RC %q object: %v, retrying...", rc.Name, err) + return false, nil + } + + // The point of the test is to make sure majority of the pods are rescheduled + // to other nodes. Pod disruption budget makes sure at most one pod + // owned by the RC is not Ready. So no need to test it. Though, useful to have it printed. + klog.Infof("[remaining %s] RC ReadyReplicas: %v, Replicas: %v", remainingTime(endTime), rcObj.Status.ReadyReplicas, rcObj.Status.Replicas) + + // This makes sure at most one replica is not ready + if rcObj.Status.Replicas-rcObj.Status.ReadyReplicas > 1 { + return false, fmt.Errorf("pod disruption budget not respected, node was not properly drained") + } + + // Depends on timing though a machine can be deleted even before there is only + // one pod left on the node (that is being evicted). + if podCounter > 2 { + klog.Infof("[remaining %s] Expecting at most 2 pods to be scheduled to drained node %q, got %v", remainingTime(endTime), machine.Status.NodeRef.Name, podCounter) + return false, nil + } + + klog.Infof("[remaining %s] Expected result: all pods from the RC up to last one or two got scheduled to a different node while respecting PDB", remainingTime(endTime)) + return true, nil + }) + + return drainedNodeName, err +} + +func WaitUntilAllRCPodsAreReady(client runtimeclient.Client, rc *corev1.ReplicationController) error { + endTime := time.Now().Add(time.Duration(WaitLong)) + err := wait.PollImmediate(RetryMedium, WaitLong, func() (bool, error) { + rcObj := corev1.ReplicationController{} + key := types.NamespacedName{ + Namespace: rc.Namespace, + Name: rc.Name, + } + if err := client.Get(context.TODO(), key, &rcObj); err != nil { + klog.Errorf("Error querying api RC %q object: %v, retrying...", rc.Name, err) + return false, nil + } + if rcObj.Status.ReadyReplicas == 0 { + klog.Infof("[%s remaining] Waiting for at least one RC ready replica, ReadyReplicas: %v, Replicas: %v", remainingTime(endTime), rcObj.Status.ReadyReplicas, rcObj.Status.Replicas) + return false, nil + } + klog.Infof("[%s remaining] Waiting for RC ready replicas, ReadyReplicas: %v, Replicas: %v", remainingTime(endTime), rcObj.Status.ReadyReplicas, rcObj.Status.Replicas) + return rcObj.Status.Replicas == rcObj.Status.ReadyReplicas, nil + }) + + // Sometimes this will timeout because Status.Replicas != + // Status.ReadyReplicas. Print the state of all the pods for + // debugging purposes so we can distinguish between the cases + // when it works and those rare cases when it doesn't. + pods := corev1.PodList{} + if err := client.List(context.TODO(), &pods, runtimeclient.MatchingLabels(rc.Spec.Selector)); err != nil { + klog.Errorf("Error listing pods: %v", err) + } else { + prettyPrint := func(i interface{}) string { + s, _ := json.MarshalIndent(i, "", " ") + return string(s) + } + for i := range pods.Items { + klog.Infof("POD #%v/%v: %s", i, len(pods.Items), prettyPrint(pods.Items[i])) + } + } + + return err +} + +func WaitUntilNodeDoesNotExists(client runtimeclient.Client, nodeName string) error { + endTime := time.Now().Add(time.Duration(WaitLong)) + return wait.PollImmediate(RetryMedium, WaitLong, func() (bool, error) { + node := corev1.Node{} + + key := types.NamespacedName{ + Name: nodeName, + } + err := client.Get(context.TODO(), key, &node) + if err == nil { + klog.Errorf("Node %q not yet deleted", nodeName) + return false, nil + } + + if !strings.Contains(err.Error(), "not found") { + klog.Errorf("Error querying api node %q object: %v, retrying...", nodeName, err) + return false, nil + } + + klog.Infof("[%s remaining] Node %q successfully deleted", remainingTime(endTime), nodeName) + return true, nil + }) +} + +// WaitUntilAllNodesAreReady lists all nodes and waits until they are ready. +func WaitUntilAllNodesAreReady(client runtimeclient.Client) error { + return wait.PollImmediate(RetryShort, PollNodesReadyTimeout, func() (bool, error) { + nodeList := corev1.NodeList{} + if err := client.List(context.TODO(), &nodeList); err != nil { + klog.Errorf("error querying api for nodeList object: %v, retrying...", err) + return false, nil + } + // All nodes needs to be ready + for _, node := range nodeList.Items { + if !IsNodeReady(&node) { + klog.Errorf("Node %q is not ready", node.Name) + return false, nil + } + } + return true, nil + }) +} + +func remainingTime(t time.Time) time.Duration { + return t.Sub(time.Now()).Round(time.Second) +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/pods.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/pods.go new file mode 100644 index 000000000..ff42a7406 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/pods.go @@ -0,0 +1,70 @@ +package framework + +import ( + "bytes" + "context" + "io" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetPods returns a list of pods matching the provided selector +func GetPods(client runtimeclient.Client, selector map[string]string) (*corev1.PodList, error) { + pods := &corev1.PodList{} + err := client.List(context.TODO(), pods, runtimeclient.MatchingLabels(selector)) + return pods, err +} + +type PodCleanupFunc func() error + +type PodLastLogFunc func(container string, lines int, previous bool) (string, error) + +// RunPodOnNode runs a pod according passed spec on particular node. +// returns created pod object, function for retrieve last logs, cleanup function and error if occurred +func RunPodOnNode(clientset *kubernetes.Clientset, node *corev1.Node, namespace string, podSpec corev1.PodSpec) (*corev1.Pod, PodLastLogFunc, PodCleanupFunc, error) { + var err error + podSpec.NodeName = node.Name + pod := &corev1.Pod{ + Spec: podSpec, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "machine-api-e2e-", + }, + } + + pod, err = clientset.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + if err != nil { + return nil, nil, nil, err + } + + cleanup := func() error { + return clientset.CoreV1().Pods(namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + } + + lastLog := func(container string, lines int, previous bool) (string, error) { + tailLines := int64(lines) + req := clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ + Container: container, + Follow: true, + Previous: previous, + TailLines: &tailLines, + }) + podLogs, err := req.Stream(context.TODO()) + if err != nil { + return "", err + } + defer podLogs.Close() + buf := new(bytes.Buffer) + _, err = io.Copy(buf, podLogs) + if err != nil { + return "", err + } + logs := buf.String() + + return logs, nil + } + + return pod, lastLog, cleanup, err +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/proxies.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/proxies.go new file mode 100644 index 000000000..848d71eff --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/proxies.go @@ -0,0 +1,310 @@ +package framework + +import ( + "context" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +const proxySetup = ` +cd /root +mkdir /root/.mitmproxy +cat /root/certs/tls.key /root/certs/tls.crt > /root/.mitmproxy/mitmproxy-ca.pem +curl -O https://snapshots.mitmproxy.org/5.3.0/mitmproxy-5.3.0-linux.tar.gz +tar xvf mitmproxy-5.3.0-linux.tar.gz +./mitmdump +` + +const mitmSignerCert = ` +-----BEGIN CERTIFICATE----- +MIICQDCCAamgAwIBAgIUXyjCAhFKon7pffzuFrHRBmLQRqwwDQYJKoZIhvcNAQEL +BQAwMjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5DMRYwFAYDVQQDDA1taXRtLXBy +b3h5LWNhMB4XDTIwMTEyMDIxMTk1NVoXDTQ4MDUwMTIxMTk1NVowMjELMAkGA1UE +BhMCVVMxCzAJBgNVBAgMAk5DMRYwFAYDVQQDDA1taXRtLXByb3h5LWNhMIGfMA0G +CSqGSIb3DQEBAQUAA4GNADCBiQKBgQDPqHuUZz1Qt236e03XWjsVCy1OnuuWQ3a1 +OQX21waqypGPem6iKiSmAmf+YrKdkX2O5L2sqPjpZ+civ9z5h9d0xCCNy+06UQTu ++pIrgIxm2p5wQmSGgI5KNUL6dgU3dC5aRDMU7hV0RGXKrJktTmU2SnZsOUe4SZ5L +Vyanpk8N1QIDAQABo1MwUTAdBgNVHQ4EFgQUFD2D/LygThvwLRKukmH1Y++hxLIw +HwYDVR0jBBgwFoAUFD2D/LygThvwLRKukmH1Y++hxLIwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOBgQCLZeGgfFRCOWqSXcDzUzeJE03HD/y2rLY+hDMn +kj69xY3+K0z4DRbb1vpMiYsTYF/Z1d6blND4KW+7oi9R6PPYg6XATDMf9tVfTQIe +qBkDNfqABPlwJABpixwD20XXQUBqADEyO3tdQLtiMi5Qr6QHOX3+FepiHAgdxAFt +Mqy4Gw== +-----END CERTIFICATE----- +` +const mitmSignerKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIICXgIBAAKBgQDPqHuUZz1Qt236e03XWjsVCy1OnuuWQ3a1OQX21waqypGPem6i +KiSmAmf+YrKdkX2O5L2sqPjpZ+civ9z5h9d0xCCNy+06UQTu+pIrgIxm2p5wQmSG +gI5KNUL6dgU3dC5aRDMU7hV0RGXKrJktTmU2SnZsOUe4SZ5LVyanpk8N1QIDAQAB +AoGBAJksuGuRc8MUawV26sZNgoNVSUhTJYgjn26x71pS5lIZNiHVt8HawEnMQJV+ +jC56YVmEFP1FbsYMpIwXZpKRxzS/uXInKUbRlYQuJ8kVG7a0foe+vja1gxmG4b+Z +69V6S+4Y8AhHEcw+Ek04LUWOzEV0NpBa8VJYVHtW3bTarE0JAkEA9DBvOmkg9rit +x2M9zLR+BCPvgP16EARY2Ik+ZlOP7hC2iPL9mEjxSujJ78SSrrY09hPomemAqBz6 +eiWbsL9KgwJBANmzt+pOGu0fON3MFbjRKfZHAdsDJQipTEGKr5jig8nZg84R+zBA +rfm9Vm2zdHaZ6rGTpMv62roHrXnq9y1htscCQQDQo8GlqsWbiNgSkNzw1xcE+p9d +Gzb8EHrJKRrD24oS4vzTrqq3PzvLwXMpBlA+Lzi5OPF48GYZPglV7GRGdGt5AkAY +7v53dW6cDeFjdcZfHoWh0UwjG18YeNtk/k9SQU86xRDVfzW3txC187t8YPtLwiEh +KXnMavS2Lb7uobyhk/ltAkEA48NjOS7IL1JMR2X/r5uBrEQhd/XjnaYePdav32Ii +2ygHNMMkN1/ueVBCypFaU2UZDVNabyZ+MJYnR8dK4xxpQg== +-----END RSA PRIVATE KEY----- +` + +// DeployClusterProxy Deploys an HTTP proxy to the proxy node +func DeployClusterProxy(c runtimeclient.Client) error { + mitmDeploymentLabels := map[string]string{ + "app": "mitm-proxy", + } + + objectMeta := metav1.ObjectMeta{ + Name: "mitm-proxy", + Namespace: "default", + Labels: mitmDeploymentLabels, + } + + mitmSigner := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mitm-signer", + Namespace: "default", + Labels: mitmDeploymentLabels, + }, + Data: map[string][]byte{ + "tls.crt": []byte(mitmSignerCert), + "tls.key": []byte(mitmSignerKey), + }, + } + + err := c.Create(context.Background(), &mitmSigner) + + var mitmBootstrapPerms int32 = 511 + mitmBootstrapConfigMap := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mitm-bootstrap", + Namespace: "default", + }, + Data: map[string]string{ + "startup.sh": proxySetup, + }, + } + + err = c.Create(context.Background(), &mitmBootstrapConfigMap) + if err != nil { + return err + } + + mitmCustomPkiConfigMap := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mitm-custom-pki", + Namespace: "openshift-config", + }, + Data: map[string]string{ + "ca-bundle.crt": mitmSignerCert, + }, + } + + err = c.Create(context.Background(), &mitmCustomPkiConfigMap) + if err != nil { + return err + } + + daemonset := &appsv1.DaemonSet{ + ObjectMeta: objectMeta, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: mitmDeploymentLabels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: objectMeta, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "mitm-bootstrap", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "mitm-bootstrap", + }, + DefaultMode: &mitmBootstrapPerms, + }, + }, + }, + { + Name: "mitm-signer", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "mitm-signer", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "proxy", + Image: "registry.redhat.io/ubi8/ubi", + Command: []string{"/bin/sh", "-c", "/root/startup.sh"}, + Ports: []corev1.ContainerPort{ + { + ContainerPort: 80, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "mitm-bootstrap", + ReadOnly: false, + MountPath: "/root/startup.sh", + SubPath: "startup.sh", + }, + { + Name: "mitm-signer", + ReadOnly: false, + MountPath: "/root/certs", + }, + }, + }, + }, + }, + }, + }, + } + err = c.Create(context.Background(), daemonset) + if err != nil { + return err + } + + IsDaemonsetAvailable(c, "mitm-proxy", "default") + + service := &corev1.Service{ + ObjectMeta: objectMeta, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + { + Protocol: "TCP", + Port: 8080, + TargetPort: intstr.IntOrString{ + IntVal: 8080, + }, + }, + }, + Selector: mitmDeploymentLabels, + }, + } + err = c.Create(context.Background(), service) + if err != nil { + return err + } + IsServiceAvailable(c, "mitm-proxy", "default") + + return err +} + +// DestroyClusterProxy destroys the HTTP proxy and associated resources +func DestroyClusterProxy(c runtimeclient.Client) error { + mitmDeploymentLabels := map[string]string{ + "app": "mitm-proxy", + } + + mitmObjectMeta := metav1.ObjectMeta{ + Name: "mitm-proxy", + Namespace: "default", + Labels: mitmDeploymentLabels, + } + + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mitm-bootstrap", + Namespace: "default", + }, + } + err := c.Delete(context.Background(), configMap) + if err != nil { + return err + } + + configMap = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mitm-custom-pki", + Namespace: "openshift-config", + }, + } + err = c.Delete(context.Background(), configMap) + if err != nil { + return err + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mitm-signer", + Namespace: "default", + }, + } + err = c.Delete(context.Background(), secret) + if err != nil { + return err + } + + daemonset := &appsv1.DaemonSet{ + ObjectMeta: mitmObjectMeta, + } + err = c.Delete(context.Background(), daemonset) + if err != nil { + return err + } + + service := &corev1.Service{ + ObjectMeta: mitmObjectMeta, + } + + return c.Delete(context.Background(), service) +} + +// WaitForProxyInjectionSync waits for the deployment to sync with the state of the cluster-proxy +func WaitForProxyInjectionSync(c runtimeclient.Client, name, namespace string, shouldBePresent bool) (bool, error) { + if err := wait.PollImmediate(RetryMedium, WaitLong, func() (bool, error) { + deployment, err := GetDeployment(c, name, namespace) + if err != nil { + return false, nil + } + hasHTTPProxy := false + hasHTTPSProxy := false + hasNoProxy := false + for _, container := range deployment.Spec.Template.Spec.Containers { + for _, envVar := range container.Env { + switch envVar.Name { + case "NO_PROXY": + hasNoProxy = true + case "HTTPS_PROXY": + hasHTTPSProxy = true + case "HTTP_PROXY": + hasHTTPProxy = true + } + } + } + return (hasHTTPProxy && + hasHTTPSProxy && + hasNoProxy) == shouldBePresent, nil + }); err != nil { + return false, fmt.Errorf("error checking isDeploymentAvailable: %v", err) + } + return true, nil +} + +// GetClusterProxy fetches the global cluster proxy object. +func GetClusterProxy(c runtimeclient.Client) (*configv1.Proxy, error) { + proxy := &configv1.Proxy{} + proxyName := runtimeclient.ObjectKey{ + Name: GlobalInfrastuctureName, + } + + if err := c.Get(context.Background(), proxyName, proxy); err != nil { + return nil, err + } + + return proxy, nil +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/services.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/services.go new file mode 100644 index 000000000..ff7d08d5d --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/services.go @@ -0,0 +1,69 @@ +package framework + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetServices returns a list of services matching the provided selector +func GetServices(client runtimeclient.Client, selector map[string]string) (*corev1.ServiceList, error) { + services := &corev1.ServiceList{} + err := client.List(context.TODO(), services, runtimeclient.MatchingLabels(selector)) + if err != nil { + return nil, fmt.Errorf("error getting Services %v", err) + } + return services, err +} + +// GetService gets service object by name and namespace. +func GetService(c runtimeclient.Client, name, namespace string) (*corev1.Service, error) { + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + s := &corev1.Service{} + + if err := wait.PollImmediate(RetryMedium, WaitShort, func() (bool, error) { + if err := c.Get(context.TODO(), key, s); err != nil { + klog.Errorf("Error querying api for Service object %q: %v, retrying...", name, err) + return false, nil + } + return true, nil + }); err != nil { + return nil, fmt.Errorf("error getting Service %q: %v", name, err) + } + return s, nil +} + +// IsServiceAvailable returns true if the service exists +func IsServiceAvailable(c runtimeclient.Client, name, namespace string) bool { + if err := wait.PollImmediate(RetryMedium, WaitLong, func() (bool, error) { + s, err := GetService(c, name, namespace) + if err != nil { + klog.Errorf("Error getting Service: %v", err) + return false, nil + } + if s.Spec.ClusterIP == "" { + klog.Errorf("Service doesn't have a clusterIP: %v", err) + return false, nil + } + klog.Infof("Service %q is available. Status: %s", + s.Name, serviceInfo(s)) + return true, nil + }); err != nil { + klog.Errorf("Error checking IsServiceAvailable: %v", err) + return false + } + return true +} + +func serviceInfo(s *corev1.Service) string { + return fmt.Sprintf("(ClusterIP: %s)", + s.Spec.ClusterIP) +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/webhooks.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/webhooks.go new file mode 100644 index 000000000..2846d2149 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/framework/webhooks.go @@ -0,0 +1,160 @@ +package framework + +import ( + "context" + "fmt" + + "github.com/openshift/machine-api-operator/pkg/webhooks" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// DefaultValidatingWebhookConfiguration is a default validating webhook configuration resource provided by MAO +var DefaultValidatingWebhookConfiguration = webhooks.NewValidatingWebhookConfiguration() + +// DefaultMutatingWebhookConfiguration is a default mutating webhook configuration resource provided by MAO +var DefaultMutatingWebhookConfiguration = webhooks.NewMutatingWebhookConfiguration() + +// GetMutatingWebhookConfiguration gets MutatingWebhookConfiguration object by name +func GetMutatingWebhookConfiguration(c client.Client, name string) (*admissionregistrationv1.MutatingWebhookConfiguration, error) { + key := client.ObjectKey{Name: name} + existing := &admissionregistrationv1.MutatingWebhookConfiguration{} + + if err := wait.PollImmediate(RetryShort, WaitShort, func() (bool, error) { + if err := c.Get(context.TODO(), key, existing); err != nil { + klog.Errorf("Error querying api for MutatingWebhookConfiguration object %q: %v, retrying...", name, err) + return false, nil + } + return true, nil + }); err != nil { + return nil, fmt.Errorf("error getting MutatingWebhookConfiguration %q: %v", name, err) + } + return existing, nil +} + +// GetValidatingWebhookConfiguration gets ValidatingWebhookConfiguration object by name +func GetValidatingWebhookConfiguration(c client.Client, name string) (*admissionregistrationv1.ValidatingWebhookConfiguration, error) { + key := client.ObjectKey{Name: name} + existing := &admissionregistrationv1.ValidatingWebhookConfiguration{} + + if err := wait.PollImmediate(RetryShort, WaitShort, func() (bool, error) { + if err := c.Get(context.TODO(), key, existing); err != nil { + klog.Errorf("Error querying api for ValidatingWebhookConfiguration object %q: %v, retrying...", name, err) + return false, nil + } + return true, nil + }); err != nil { + return nil, fmt.Errorf("error getting ValidatingWebhookConfiguration %q: %v", name, err) + } + return existing, nil +} + +// DeleteValidatingWebhookConfiguration deletes the specified ValidatingWebhookConfiguration object +func DeleteValidatingWebhookConfiguration(c client.Client, webhookConfiguraiton *admissionregistrationv1.ValidatingWebhookConfiguration) error { + return wait.PollImmediate(RetryShort, WaitShort, func() (bool, error) { + if err := c.Delete(context.TODO(), webhookConfiguraiton); apierrors.IsNotFound(err) { + return true, nil + } else if err != nil { + klog.Errorf("error querying api for ValidatingWebhookConfiguration object %q: %v, retrying...", webhookConfiguraiton.Name, err) + return false, nil + } + return true, nil + }) +} + +// DeleteMutatingWebhookConfiguration deletes the specified MutatingWebhookConfiguration object +func DeleteMutatingWebhookConfiguration(c client.Client, webhookConfiguraiton *admissionregistrationv1.MutatingWebhookConfiguration) error { + return wait.PollImmediate(RetryShort, WaitShort, func() (bool, error) { + if err := c.Delete(context.TODO(), webhookConfiguraiton); apierrors.IsNotFound(err) { + return true, nil + } else if err != nil { + klog.Errorf("error querying api for MutatingWebhookConfiguration object %q: %v, retrying...", webhookConfiguraiton.Name, err) + return false, nil + } + return true, nil + }) +} + +// UpdateMutatingWebhookConfiguration updates the specified mutating webhook configuration +func UpdateMutatingWebhookConfiguration(c client.Client, updated *admissionregistrationv1.MutatingWebhookConfiguration) error { + return wait.PollImmediate(RetryShort, WaitShort, func() (bool, error) { + existing, err := GetMutatingWebhookConfiguration(c, updated.Name) + if err != nil { + klog.Errorf("Error getting MutatingWebhookConfiguration: %v", err) + return false, nil + } + if err := c.Patch(context.TODO(), existing, client.MergeFrom(updated)); err != nil { + klog.Errorf("error patching MutatingWebhookConfiguration object %q: %v, retrying...", updated.Name, err) + return false, nil + } + return true, nil + }) +} + +// UpdateValidatingWebhookConfiguration updates the specified mutating webhook configuration +func UpdateValidatingWebhookConfiguration(c client.Client, updated *admissionregistrationv1.ValidatingWebhookConfiguration) error { + return wait.PollImmediate(RetryShort, WaitShort, func() (bool, error) { + existing, err := GetValidatingWebhookConfiguration(c, updated.Name) + if err != nil { + klog.Errorf("Error getting ValidatingWebhookConfiguration: %v", err) + return false, nil + } + if err := c.Patch(context.TODO(), existing, client.MergeFrom(updated)); err != nil { + klog.Errorf("error patching ValidatingWebhookConfiguration object %q: %v, retrying...", updated.Name, err) + return false, nil + } + return true, nil + }) +} + +// IsMutatingWebhookConfigurationSynced expects a matching MutatingWebhookConfiguration to be present in the cluster +func IsMutatingWebhookConfigurationSynced(c client.Client) bool { + if err := wait.PollImmediate(RetryShort, WaitMedium, func() (bool, error) { + existing, err := GetMutatingWebhookConfiguration(c, DefaultMutatingWebhookConfiguration.Name) + if err != nil { + klog.Errorf("Error getting MutatingWebhookConfiguration: %v", err) + return false, nil + } + + // Due to caBundle injection by service-ca-operator, we have to use DeepDerivative, + // which will ignore change in spec.webhooks[x].serviceReference.caBundle in comparison + // to empty value, as the default webhook configuration does not have this field set + equal := equality.Semantic.DeepDerivative(DefaultMutatingWebhookConfiguration.Webhooks, existing.Webhooks) + if !equal { + klog.Infof("MutatingWebhookConfiguration is not yet equal, retrying...") + } + return equal, nil + }); err != nil { + klog.Errorf("Error waiting for match with expected MutatingWebhookConfigurationMatched: %v", err) + return false + } + return true +} + +// IsValidatingWebhookConfigurationSynced expects a matching MutatingWebhookConfiguration to be present in the cluster +func IsValidatingWebhookConfigurationSynced(c client.Client) bool { + if err := wait.PollImmediate(RetryShort, WaitMedium, func() (bool, error) { + existing, err := GetValidatingWebhookConfiguration(c, DefaultValidatingWebhookConfiguration.Name) + if err != nil { + klog.Errorf("Error getting MutatingWebhookConfiguration: %v", err) + return false, nil + } + + // Due to caBundle injection by service-ca-operator, we have to use DeepDerivative, + // which will ignore change in spec.webhooks[x].serviceReference.caBundle in comparison + // to empty value, as the default webhook configuration does not have this field set + equal := equality.Semantic.DeepDerivative(DefaultValidatingWebhookConfiguration.Webhooks, existing.Webhooks) + if !equal { + klog.Infof("ValidatingWebhookConfiguration is not yet equal, retrying...") + } + return equal, nil + }); err != nil { + klog.Errorf("Error waiting for match with expected ValidatingWebhookConfigurationMatched: %v", err) + return false + } + return true +} diff --git a/vendor/github.com/openshift/cluster-autoscaler-operator/LICENSE b/vendor/github.com/openshift/cluster-autoscaler-operator/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/openshift/cluster-autoscaler-operator/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/clusterautoscaler_types.go b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/clusterautoscaler_types.go new file mode 100644 index 000000000..ed7dcf1a5 --- /dev/null +++ b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/clusterautoscaler_types.go @@ -0,0 +1,135 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + SchemeBuilder.Register(&ClusterAutoscaler{}, &ClusterAutoscalerList{}) +} + +// ClusterAutoscalerSpec defines the desired state of ClusterAutoscaler +type ClusterAutoscalerSpec struct { + // Constraints of autoscaling resources + ResourceLimits *ResourceLimits `json:"resourceLimits,omitempty"` + + // Configuration of scale down operation + ScaleDown *ScaleDownConfig `json:"scaleDown,omitempty"` + + // Gives pods graceful termination time before scaling down + MaxPodGracePeriod *int32 `json:"maxPodGracePeriod,omitempty"` + + // Maximum time CA waits for node to be provisioned + // +kubebuilder:validation:Pattern=^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + MaxNodeProvisionTime string `json:"maxNodeProvisionTime,omitempty"` + + // To allow users to schedule "best-effort" pods, which shouldn't trigger + // Cluster Autoscaler actions, but only run when there are spare resources available, + // More info: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-does-cluster-autoscaler-work-with-pod-priority-and-preemption + PodPriorityThreshold *int32 `json:"podPriorityThreshold,omitempty"` + + // BalanceSimilarNodeGroups enables/disables the + // `--balance-similar-node-groups` cluster-autocaler feature. + // This feature will automatically identify node groups with + // the same instance type and the same set of labels and try + // to keep the respective sizes of those node groups balanced. + BalanceSimilarNodeGroups *bool `json:"balanceSimilarNodeGroups,omitempty"` + + // Enables/Disables `--ignore-daemonsets-utilization` CA feature flag. Should CA ignore DaemonSet pods when calculating resource utilization for scaling down. false by default + IgnoreDaemonsetsUtilization *bool `json:"ignoreDaemonsetsUtilization,omitempty"` + + // Enables/Disables `--skip-nodes-with-local-storage` CA feature flag. If true cluster autoscaler will never delete nodes with pods with local storage, e.g. EmptyDir or HostPath. true by default at autoscaler + SkipNodesWithLocalStorage *bool `json:"skipNodesWithLocalStorage,omitempty"` +} + +// ClusterAutoscalerStatus defines the observed state of ClusterAutoscaler +type ClusterAutoscalerStatus struct { + // TODO: Add status fields. +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterAutoscaler is the Schema for the clusterautoscalers API +// +k8s:openapi-gen=true +// +kubebuilder:resource:path=clusterautoscalers,shortName=ca,scope=Cluster +// +kubebuilder:subresource:status +// +genclient:nonNamespaced +type ClusterAutoscaler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Desired state of ClusterAutoscaler resource + Spec ClusterAutoscalerSpec `json:"spec,omitempty"` + + // Most recently observed status of ClusterAutoscaler resource + Status ClusterAutoscalerStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterAutoscalerList contains a list of ClusterAutoscaler +type ClusterAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterAutoscaler `json:"items"` +} + +type ResourceLimits struct { + // Maximum number of nodes in all node groups. + // Cluster autoscaler will not grow the cluster beyond this number. + // +kubebuilder:validation:Minimum=0 + MaxNodesTotal *int32 `json:"maxNodesTotal,omitempty"` + + // Minimum and maximum number of cores in cluster, in the format :. + // Cluster autoscaler will not scale the cluster beyond these numbers. + Cores *ResourceRange `json:"cores,omitempty"` + + // Minimum and maximum number of gigabytes of memory in cluster, in the format :. + // Cluster autoscaler will not scale the cluster beyond these numbers. + Memory *ResourceRange `json:"memory,omitempty"` + + // Minimum and maximum number of different GPUs in cluster, in the format ::. + // Cluster autoscaler will not scale the cluster beyond these numbers. Can be passed multiple times. + GPUS []GPULimit `json:"gpus,omitempty"` +} + +type GPULimit struct { + // +kubebuilder:validation:MinLength=1 + Type string `json:"type"` + + // +kubebuilder:validation:Minimum=0 + Min int32 `json:"min"` + // +kubebuilder:validation:Minimum=1 + Max int32 `json:"max"` +} + +type ResourceRange struct { + // +kubebuilder:validation:Minimum=0 + Min int32 `json:"min"` + Max int32 `json:"max"` +} + +type ScaleDownConfig struct { + // Should CA scale down the cluster + Enabled bool `json:"enabled"` + + // How long after scale up that scale down evaluation resumes + // +kubebuilder:validation:Pattern=([0-9]*(\.[0-9]*)?[a-z]+)+ + DelayAfterAdd *string `json:"delayAfterAdd,omitempty"` + + // How long after node deletion that scale down evaluation resumes, defaults to scan-interval + // +kubebuilder:validation:Pattern=([0-9]*(\.[0-9]*)?[a-z]+)+ + DelayAfterDelete *string `json:"delayAfterDelete,omitempty"` + + // How long after scale down failure that scale down evaluation resumes + // +kubebuilder:validation:Pattern=([0-9]*(\.[0-9]*)?[a-z]+)+ + DelayAfterFailure *string `json:"delayAfterFailure,omitempty"` + + // How long a node should be unneeded before it is eligible for scale down + // +kubebuilder:validation:Pattern=([0-9]*(\.[0-9]*)?[a-z]+)+ + UnneededTime *string `json:"unneededTime,omitempty"` + + // Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down + // +kubebuilder:validation:Pattern=(0.[0-9]+) + UtilizationThreshold *string `json:"utilizationThreshold,omitempty"` +} diff --git a/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/doc.go b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/doc.go new file mode 100644 index 000000000..d89992f41 --- /dev/null +++ b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/doc.go @@ -0,0 +1,4 @@ +// Package v1 contains API Schema definitions for the autoscaling v1 API group +// +k8s:deepcopy-gen=package,register +// +groupName=autoscaling.openshift.io +package v1 diff --git a/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/register.go b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/register.go new file mode 100644 index 000000000..8d8537d51 --- /dev/null +++ b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/register.go @@ -0,0 +1,19 @@ +// NOTE: Boilerplate only. Ignore this file. + +// Package v1 contains API Schema definitions for the autoscaling v1 API group +// +k8s:deepcopy-gen=package,register +// +groupName=autoscaling.openshift.io +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) diff --git a/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..0eef26245 --- /dev/null +++ b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go @@ -0,0 +1,256 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2020 Red Hat, Inc. + * + */ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAutoscaler) DeepCopyInto(out *ClusterAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscaler. +func (in *ClusterAutoscaler) DeepCopy() *ClusterAutoscaler { + if in == nil { + return nil + } + out := new(ClusterAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAutoscalerList) DeepCopyInto(out *ClusterAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscalerList. +func (in *ClusterAutoscalerList) DeepCopy() *ClusterAutoscalerList { + if in == nil { + return nil + } + out := new(ClusterAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAutoscalerSpec) DeepCopyInto(out *ClusterAutoscalerSpec) { + *out = *in + if in.ResourceLimits != nil { + in, out := &in.ResourceLimits, &out.ResourceLimits + *out = new(ResourceLimits) + (*in).DeepCopyInto(*out) + } + if in.ScaleDown != nil { + in, out := &in.ScaleDown, &out.ScaleDown + *out = new(ScaleDownConfig) + (*in).DeepCopyInto(*out) + } + if in.MaxPodGracePeriod != nil { + in, out := &in.MaxPodGracePeriod, &out.MaxPodGracePeriod + *out = new(int32) + **out = **in + } + if in.PodPriorityThreshold != nil { + in, out := &in.PodPriorityThreshold, &out.PodPriorityThreshold + *out = new(int32) + **out = **in + } + if in.BalanceSimilarNodeGroups != nil { + in, out := &in.BalanceSimilarNodeGroups, &out.BalanceSimilarNodeGroups + *out = new(bool) + **out = **in + } + if in.IgnoreDaemonsetsUtilization != nil { + in, out := &in.IgnoreDaemonsetsUtilization, &out.IgnoreDaemonsetsUtilization + *out = new(bool) + **out = **in + } + if in.SkipNodesWithLocalStorage != nil { + in, out := &in.SkipNodesWithLocalStorage, &out.SkipNodesWithLocalStorage + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscalerSpec. +func (in *ClusterAutoscalerSpec) DeepCopy() *ClusterAutoscalerSpec { + if in == nil { + return nil + } + out := new(ClusterAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAutoscalerStatus) DeepCopyInto(out *ClusterAutoscalerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscalerStatus. +func (in *ClusterAutoscalerStatus) DeepCopy() *ClusterAutoscalerStatus { + if in == nil { + return nil + } + out := new(ClusterAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GPULimit) DeepCopyInto(out *GPULimit) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPULimit. +func (in *GPULimit) DeepCopy() *GPULimit { + if in == nil { + return nil + } + out := new(GPULimit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceLimits) DeepCopyInto(out *ResourceLimits) { + *out = *in + if in.MaxNodesTotal != nil { + in, out := &in.MaxNodesTotal, &out.MaxNodesTotal + *out = new(int32) + **out = **in + } + if in.Cores != nil { + in, out := &in.Cores, &out.Cores + *out = new(ResourceRange) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(ResourceRange) + **out = **in + } + if in.GPUS != nil { + in, out := &in.GPUS, &out.GPUS + *out = make([]GPULimit, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceLimits. +func (in *ResourceLimits) DeepCopy() *ResourceLimits { + if in == nil { + return nil + } + out := new(ResourceLimits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRange) DeepCopyInto(out *ResourceRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRange. +func (in *ResourceRange) DeepCopy() *ResourceRange { + if in == nil { + return nil + } + out := new(ResourceRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleDownConfig) DeepCopyInto(out *ScaleDownConfig) { + *out = *in + if in.DelayAfterAdd != nil { + in, out := &in.DelayAfterAdd, &out.DelayAfterAdd + *out = new(string) + **out = **in + } + if in.DelayAfterDelete != nil { + in, out := &in.DelayAfterDelete, &out.DelayAfterDelete + *out = new(string) + **out = **in + } + if in.DelayAfterFailure != nil { + in, out := &in.DelayAfterFailure, &out.DelayAfterFailure + *out = new(string) + **out = **in + } + if in.UnneededTime != nil { + in, out := &in.UnneededTime, &out.UnneededTime + *out = new(string) + **out = **in + } + if in.UtilizationThreshold != nil { + in, out := &in.UtilizationThreshold, &out.UtilizationThreshold + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleDownConfig. +func (in *ScaleDownConfig) DeepCopy() *ScaleDownConfig { + if in == nil { + return nil + } + out := new(ScaleDownConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/machine-api-operator/LICENSE b/vendor/github.com/openshift/machine-api-operator/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/openshift/machine-api-operator/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/util/lifecyclehooks/sort.go b/vendor/github.com/openshift/machine-api-operator/pkg/util/lifecyclehooks/sort.go new file mode 100644 index 000000000..3a65147e3 --- /dev/null +++ b/vendor/github.com/openshift/machine-api-operator/pkg/util/lifecyclehooks/sort.go @@ -0,0 +1,22 @@ +package lifecyclehooks + +import ( + "reflect" + + machinev1 "github.com/openshift/api/machine/v1beta1" +) + +func GetChangedLifecycleHooks(old, new []machinev1.LifecycleHook) []machinev1.LifecycleHook { + oldSet := make(map[string]machinev1.LifecycleHook) + for _, hook := range old { + oldSet[hook.Name] = hook + } + + changedHooks := []machinev1.LifecycleHook{} + for _, hook := range new { + if oldHook, ok := oldSet[hook.Name]; !ok || !reflect.DeepEqual(oldHook, hook) { + changedHooks = append(changedHooks, hook) + } + } + return changedHooks +} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/webhooks/machine_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/webhooks/machine_webhook.go new file mode 100644 index 000000000..8f034ac5a --- /dev/null +++ b/vendor/github.com/openshift/machine-api-operator/pkg/webhooks/machine_webhook.go @@ -0,0 +1,1689 @@ +package webhooks + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "regexp" + "runtime" + "strconv" + "strings" + + osconfigv1 "github.com/openshift/api/config/v1" + machinev1 "github.com/openshift/api/machine/v1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + osclientset "github.com/openshift/client-go/config/clientset/versioned" + "github.com/openshift/machine-api-operator/pkg/util/lifecyclehooks" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kruntime "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + yaml "sigs.k8s.io/yaml" +) + +type systemSpecifications struct { + minMemoryGiB int32 + maxMemoryGiB int32 + minProcessorSharedCapped float64 + minProcessorDedicated float64 + maxProcessor float64 +} + +var ( + // Azure Defaults + defaultAzureVnet = func(clusterID string) string { + return fmt.Sprintf("%s-vnet", clusterID) + } + defaultAzureSubnet = func(clusterID string) string { + return fmt.Sprintf("%s-worker-subnet", clusterID) + } + defaultAzureNetworkResourceGroup = func(clusterID string) string { + return fmt.Sprintf("%s-rg", clusterID) + } + defaultAzureImageResourceID = func(clusterID string) string { + return fmt.Sprintf("/resourceGroups/%s/providers/Microsoft.Compute/images/%s", clusterID+"-rg", clusterID) + } + defaultAzureManagedIdentiy = func(clusterID string) string { + return fmt.Sprintf("%s-identity", clusterID) + } + defaultAzureResourceGroup = func(clusterID string) string { + return fmt.Sprintf("%s-rg", clusterID) + } + + // GCP Defaults + defaultGCPNetwork = func(clusterID string) string { + return fmt.Sprintf("%s-network", clusterID) + } + defaultGCPSubnetwork = func(clusterID string) string { + return fmt.Sprintf("%s-worker-subnet", clusterID) + } + defaultGCPTags = func(clusterID string) []string { + return []string{fmt.Sprintf("%s-worker", clusterID)} + } + + // Power VS variables + + //powerVSMachineConfigurations contains the known Power VS system types and their allowed configuration limits + powerVSMachineConfigurations = map[string]systemSpecifications{ + "s922": { + minMemoryGiB: 32, + maxMemoryGiB: 942, + minProcessorSharedCapped: 0.5, + minProcessorDedicated: 1, + maxProcessor: 15, + }, + "e880": { + minMemoryGiB: 32, + maxMemoryGiB: 7463, + minProcessorSharedCapped: 0.5, + minProcessorDedicated: 1, + maxProcessor: 143, + }, + "e980": { + minMemoryGiB: 32, + maxMemoryGiB: 15307, + minProcessorSharedCapped: 0.5, + minProcessorDedicated: 1, + maxProcessor: 143, + }, + } +) + +const ( + DefaultMachineMutatingHookPath = "/mutate-machine-openshift-io-v1beta1-machine" + DefaultMachineValidatingHookPath = "/validate-machine-openshift-io-v1beta1-machine" + DefaultMachineSetMutatingHookPath = "/mutate-machine-openshift-io-v1beta1-machineset" + DefaultMachineSetValidatingHookPath = "/validate-machine-openshift-io-v1beta1-machineset" + + defaultWebhookConfigurationName = "machine-api" + defaultWebhookServiceName = "machine-api-operator-webhook" + defaultWebhookServiceNamespace = "openshift-machine-api" + defaultWebhookServicePort = 443 + + defaultUserDataSecret = "worker-user-data" + defaultSecretNamespace = "openshift-machine-api" + + // AWS Defaults + defaultAWSCredentialsSecret = "aws-cloud-credentials" + defaultAWSX86InstanceType = "m5.large" + defaultAWSARMInstanceType = "m6g.large" + + // Azure Defaults + defaultAzureVMSize = "Standard_D4s_V3" + defaultAzureCredentialsSecret = "azure-cloud-credentials" + defaultAzureOSDiskOSType = "Linux" + defaultAzureOSDiskStorageType = "Premium_LRS" + + // Azure OSDisk constants + azureMaxDiskSizeGB = 32768 + azureEphemeralStorageLocationLocal = "Local" + azureCachingTypeNone = "None" + azureCachingTypeReadOnly = "ReadOnly" + azureCachingTypeReadWrite = "ReadWrite" + + // GCP Defaults + defaultGCPMachineType = "n1-standard-4" + defaultGCPCredentialsSecret = "gcp-cloud-credentials" + defaultGCPDiskSizeGb = 128 + defaultGCPDiskType = "pd-standard" + // https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.8/48.83.202103122318-0/x86_64/meta.json + // https://github.com/openshift/installer/blob/796a99049d3b7489b6c08ec5bd7c7983731afbcf/data/data/rhcos.json#L90-L94 + defaultGCPDiskImage = "projects/rhcos-cloud/global/images/rhcos-48-83-202103221318-0-gcp-x86-64" + defaultGCPGPUCount = 1 + + // vSphere Defaults + defaultVSphereCredentialsSecret = "vsphere-cloud-credentials" + // Minimum vSphere values taken from vSphere reconciler + minVSphereCPU = 2 + minVSphereMemoryMiB = 2048 + // https://docs.openshift.com/container-platform/4.1/installing/installing_vsphere/installing-vsphere.html#minimum-resource-requirements_installing-vsphere + minVSphereDiskGiB = 120 + + // PowerVS Defaults + defaultPowerVSCredentialsSecret = "powervs-credentials" + defaultPowerVSSysType = "s922" + defaultPowerVSProcType = "Shared" + defaultPowerVSProcessor = "0.5" + defaultPowerVSMemory = 32 + powerVSServiceInstance = "serviceInstance" + powerVSNetwork = "network" + powerVSImage = "image" + powerVSSystemTypeE880 = "e880" + powerVSSystemTypeE980 = "e980" +) + +var ( + // webhookFailurePolicy is ignore so we don't want to block machine lifecycle on the webhook operational aspects. + // This would be particularly problematic for chicken egg issues when bootstrapping a cluster. + webhookFailurePolicy = admissionregistrationv1.Ignore + webhookSideEffects = admissionregistrationv1.SideEffectClassNone +) + +func secretExists(c client.Client, name, namespace string) (bool, error) { + key := client.ObjectKey{ + Name: name, + Namespace: namespace, + } + obj := &corev1.Secret{} + + if err := c.Get(context.Background(), key, obj); err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil +} + +func credentialsSecretExists(c client.Client, name, namespace string) []string { + secretExists, err := secretExists(c, name, namespace) + if err != nil { + return []string{ + field.Invalid( + field.NewPath("providerSpec", "credentialsSecret"), + name, + fmt.Sprintf("failed to get credentialsSecret: %v", err), + ).Error(), + } + } + + if !secretExists { + return []string{ + field.Invalid( + field.NewPath("providerSpec", "credentialsSecret"), + name, + "not found. Expected CredentialsSecret to exist", + ).Error(), + } + } + + return []string{} +} + +func getInfra() (*osconfigv1.Infrastructure, error) { + cfg, err := ctrl.GetConfig() + if err != nil { + return nil, err + } + client, err := osclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + infra, err := client.ConfigV1().Infrastructures().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return nil, err + } + return infra, nil +} + +func getDNS() (*osconfigv1.DNS, error) { + cfg, err := ctrl.GetConfig() + if err != nil { + return nil, err + } + client, err := osclientset.NewForConfig(cfg) + if err != nil { + return nil, err + } + dns, err := client.ConfigV1().DNSes().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return dns, nil +} + +type machineAdmissionFn func(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) + +type admissionConfig struct { + clusterID string + platformStatus *osconfigv1.PlatformStatus + dnsDisconnected bool + client client.Client +} + +type admissionHandler struct { + *admissionConfig + webhookOperations machineAdmissionFn + decoder *admission.Decoder +} + +// InjectDecoder injects the decoder. +func (a *admissionHandler) InjectDecoder(d *admission.Decoder) error { + a.decoder = d + return nil +} + +// machineValidatorHandler validates Machine API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineValidatorHandler struct { + *admissionHandler +} + +// machineDefaulterHandler defaults Machine API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineDefaulterHandler struct { + *admissionHandler +} + +// NewValidator returns a new machineValidatorHandler. +func NewMachineValidator(client client.Client) (*machineValidatorHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + dns, err := getDNS() + if err != nil { + return nil, err + } + + return createMachineValidator(infra, client, dns), nil +} + +func createMachineValidator(infra *osconfigv1.Infrastructure, client client.Client, dns *osconfigv1.DNS) *machineValidatorHandler { + admissionConfig := &admissionConfig{ + dnsDisconnected: dns.Spec.PublicZone == nil, + clusterID: infra.Status.InfrastructureName, + platformStatus: infra.Status.PlatformStatus, + client: client, + } + return &machineValidatorHandler{ + admissionHandler: &admissionHandler{ + admissionConfig: admissionConfig, + webhookOperations: getMachineValidatorOperation(infra.Status.PlatformStatus.Type), + }, + } +} + +func getMachineValidatorOperation(platform osconfigv1.PlatformType) machineAdmissionFn { + switch platform { + case osconfigv1.AWSPlatformType: + return validateAWS + case osconfigv1.AzurePlatformType: + return validateAzure + case osconfigv1.GCPPlatformType: + return validateGCP + case osconfigv1.VSpherePlatformType: + return validateVSphere + case osconfigv1.PowerVSPlatformType: + return validatePowerVS + default: + // just no-op + return func(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + return true, []string{}, nil + } + } +} + +// NewDefaulter returns a new machineDefaulterHandler. +func NewMachineDefaulter() (*machineDefaulterHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + return createMachineDefaulter(infra.Status.PlatformStatus, infra.Status.InfrastructureName), nil +} + +func createMachineDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineDefaulterHandler { + return &machineDefaulterHandler{ + admissionHandler: &admissionHandler{ + admissionConfig: &admissionConfig{clusterID: clusterID}, + webhookOperations: getMachineDefaulterOperation(platformStatus), + }, + } +} + +func getMachineDefaulterOperation(platformStatus *osconfigv1.PlatformStatus) machineAdmissionFn { + switch platformStatus.Type { + case osconfigv1.AWSPlatformType: + region := "" + if platformStatus.AWS != nil { + region = platformStatus.AWS.Region + } + arch := runtime.GOARCH + return awsDefaulter{region: region, arch: arch}.defaultAWS + case osconfigv1.AzurePlatformType: + return defaultAzure + case osconfigv1.GCPPlatformType: + return defaultGCP + case osconfigv1.VSpherePlatformType: + return defaultVSphere + case osconfigv1.PowerVSPlatformType: + return defaultPowerVS + default: + // just no-op + return func(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + return true, []string{}, nil + } + } +} + +// NewValidatingWebhookConfiguration creates a validation webhook configuration with configured Machine and MachineSet webhooks +func NewValidatingWebhookConfiguration() *admissionregistrationv1.ValidatingWebhookConfiguration { + validatingWebhookConfiguration := &admissionregistrationv1.ValidatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultWebhookConfigurationName, + Annotations: map[string]string{ + "service.beta.openshift.io/inject-cabundle": "true", + }, + }, + Webhooks: []admissionregistrationv1.ValidatingWebhook{ + MachineValidatingWebhook(), + MachineSetValidatingWebhook(), + }, + } + + // Setting group version is required for testEnv to create unstructured objects, as the new structure sets it on empty strings + // Usual way to populate those values, is to create the resource in the cluster first, which we can't yet do. + validatingWebhookConfiguration.SetGroupVersionKind(admissionregistrationv1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration")) + return validatingWebhookConfiguration +} + +// MachineValidatingWebhook returns validating webhooks for machine to populate the configuration +func MachineValidatingWebhook() admissionregistrationv1.ValidatingWebhook { + serviceReference := admissionregistrationv1.ServiceReference{ + Namespace: defaultWebhookServiceNamespace, + Name: defaultWebhookServiceName, + Path: pointer.StringPtr(DefaultMachineValidatingHookPath), + Port: pointer.Int32Ptr(defaultWebhookServicePort), + } + return admissionregistrationv1.ValidatingWebhook{ + AdmissionReviewVersions: []string{"v1"}, + Name: "validation.machine.machine.openshift.io", + FailurePolicy: &webhookFailurePolicy, + SideEffects: &webhookSideEffects, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &serviceReference, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{machinev1beta1.GroupName}, + APIVersions: []string{machinev1beta1.SchemeGroupVersion.Version}, + Resources: []string{"machines"}, + }, + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + admissionregistrationv1.Update, + }, + }, + }, + } +} + +// MachineSetValidatingWebhook returns validating webhooks for machineSet to populate the configuration +func MachineSetValidatingWebhook() admissionregistrationv1.ValidatingWebhook { + machinesetServiceReference := admissionregistrationv1.ServiceReference{ + Namespace: defaultWebhookServiceNamespace, + Name: defaultWebhookServiceName, + Path: pointer.StringPtr(DefaultMachineSetValidatingHookPath), + Port: pointer.Int32Ptr(defaultWebhookServicePort), + } + return admissionregistrationv1.ValidatingWebhook{ + AdmissionReviewVersions: []string{"v1"}, + Name: "validation.machineset.machine.openshift.io", + FailurePolicy: &webhookFailurePolicy, + SideEffects: &webhookSideEffects, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &machinesetServiceReference, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{machinev1beta1.GroupName}, + APIVersions: []string{machinev1beta1.SchemeGroupVersion.Version}, + Resources: []string{"machinesets"}, + }, + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + admissionregistrationv1.Update, + }, + }, + }, + } +} + +// NewMutatingWebhookConfiguration creates a mutating webhook configuration with configured Machine and MachineSet webhooks +func NewMutatingWebhookConfiguration() *admissionregistrationv1.MutatingWebhookConfiguration { + mutatingWebhookConfiguration := &admissionregistrationv1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: defaultWebhookConfigurationName, + Annotations: map[string]string{ + "service.beta.openshift.io/inject-cabundle": "true", + }, + }, + Webhooks: []admissionregistrationv1.MutatingWebhook{ + MachineMutatingWebhook(), + MachineSetMutatingWebhook(), + }, + } + + // Setting group version is required for testEnv to create unstructured objects, as the new structure sets it on empty strings + // Usual way to populate those values, is to create the resource in the cluster first, which we can't yet do. + mutatingWebhookConfiguration.SetGroupVersionKind(admissionregistrationv1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration")) + return mutatingWebhookConfiguration +} + +// MachineMutatingWebhook returns mutating webhooks for machine to apply in configuration +func MachineMutatingWebhook() admissionregistrationv1.MutatingWebhook { + machineServiceReference := admissionregistrationv1.ServiceReference{ + Namespace: defaultWebhookServiceNamespace, + Name: defaultWebhookServiceName, + Path: pointer.StringPtr(DefaultMachineMutatingHookPath), + Port: pointer.Int32Ptr(defaultWebhookServicePort), + } + return admissionregistrationv1.MutatingWebhook{ + AdmissionReviewVersions: []string{"v1"}, + Name: "default.machine.machine.openshift.io", + FailurePolicy: &webhookFailurePolicy, + SideEffects: &webhookSideEffects, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &machineServiceReference, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{machinev1beta1.GroupName}, + APIVersions: []string{machinev1beta1.SchemeGroupVersion.Version}, + Resources: []string{"machines"}, + }, + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + }, + }, + }, + } +} + +// MachineSetMutatingWebhook returns mutating webhook for machineSet to apply in configuration +func MachineSetMutatingWebhook() admissionregistrationv1.MutatingWebhook { + machineSetServiceReference := admissionregistrationv1.ServiceReference{ + Namespace: defaultWebhookServiceNamespace, + Name: defaultWebhookServiceName, + Path: pointer.StringPtr(DefaultMachineSetMutatingHookPath), + Port: pointer.Int32Ptr(defaultWebhookServicePort), + } + return admissionregistrationv1.MutatingWebhook{ + AdmissionReviewVersions: []string{"v1"}, + Name: "default.machineset.machine.openshift.io", + FailurePolicy: &webhookFailurePolicy, + SideEffects: &webhookSideEffects, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &machineSetServiceReference, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{machinev1beta1.GroupName}, + APIVersions: []string{machinev1beta1.SchemeGroupVersion.Version}, + Resources: []string{"machinesets"}, + }, + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + }, + }, + }, + } +} + +func (h *machineValidatorHandler) validateMachine(m, oldM *machinev1beta1.Machine) (bool, []string, utilerrors.Aggregate) { + // Skip validation if we just remove the finalizer. + // For more information: https://issues.redhat.com/browse/OCPCLOUD-1426 + if !m.DeletionTimestamp.IsZero() { + isFinalizerOnly, err := isFinalizerOnlyRemoval(m, oldM) + if err != nil { + return false, nil, utilerrors.NewAggregate([]error{err}) + } + if isFinalizerOnly { + return true, nil, nil + } + } + + errs := validateMachineLifecycleHooks(m, oldM) + + ok, warnings, err := h.webhookOperations(m, h.admissionConfig) + if !ok { + errs = append(errs, err.Errors()...) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineValidatorHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + m := &machinev1beta1.Machine{} + + if err := h.decoder.Decode(req, m); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + var oldM *machinev1beta1.Machine + if len(req.OldObject.Raw) > 0 { + // oldM must only be initialised if there is an old object (ie on UPDATE or DELETE). + // It should be nil otherwise to allow skipping certain validations that rely on + // the presence of the old object. + oldM = &machinev1beta1.Machine{} + if err := h.decoder.DecodeRaw(req.OldObject, oldM); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + } + + klog.V(3).Infof("Validate webhook called for Machine: %s", m.GetName()) + + ok, warnings, errs := h.validateMachine(m, oldM) + if !ok { + return admission.Denied(errs.Error()).WithWarnings(warnings...) + } + + return admission.Allowed("Machine valid").WithWarnings(warnings...) +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineDefaulterHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + m := &machinev1beta1.Machine{} + + if err := h.decoder.Decode(req, m); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + klog.V(3).Infof("Mutate webhook called for Machine: %s", m.GetName()) + + // Only enforce the clusterID if it's not set. + // Otherwise a discrepancy on the value would leave the machine orphan + // and would trigger a new machine creation by the machineSet. + // https://bugzilla.redhat.com/show_bug.cgi?id=1857175 + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if _, ok := m.Labels[machinev1beta1.MachineClusterIDLabel]; !ok { + m.Labels[machinev1beta1.MachineClusterIDLabel] = h.clusterID + } + + ok, warnings, errs := h.webhookOperations(m, h.admissionConfig) + if !ok { + return admission.Denied(errs.Error()).WithWarnings(warnings...) + } + + marshaledMachine, err := json.Marshal(m) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err).WithWarnings(warnings...) + } + return admission.PatchResponseFromRaw(req.Object.Raw, marshaledMachine).WithWarnings(warnings...) +} + +type awsDefaulter struct { + region string + arch string +} + +func (a awsDefaulter) defaultAWS(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Defaulting AWS providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1beta1.AWSMachineProviderConfig) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.InstanceType == "" { + if a.arch == "arm64" { + providerSpec.InstanceType = defaultAWSARMInstanceType + } else { + providerSpec.InstanceType = defaultAWSX86InstanceType + } + } + + if providerSpec.Placement.Region == "" { + providerSpec.Placement.Region = a.region + } + + if providerSpec.UserDataSecret == nil { + providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} + } + + if providerSpec.CredentialsSecret == nil { + providerSpec.CredentialsSecret = &corev1.LocalObjectReference{Name: defaultAWSCredentialsSecret} + } + + rawBytes, err := json.Marshal(providerSpec) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + m.Spec.ProviderSpec.Value = &kruntime.RawExtension{Raw: rawBytes} + return true, warnings, nil +} + +func unmarshalInto(m *machinev1beta1.Machine, providerSpec interface{}) error { + if m.Spec.ProviderSpec.Value == nil { + return field.Required(field.NewPath("providerSpec", "value"), "a value must be provided") + } + + if err := yaml.Unmarshal(m.Spec.ProviderSpec.Value.Raw, &providerSpec); err != nil { + return field.Invalid(field.NewPath("providerSpec", "value"), providerSpec, err.Error()) + } + return nil +} + +func validateAWS(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Validating AWS providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1beta1.AWSMachineProviderConfig) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.AMI.ID == nil { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "ami"), + "expected providerSpec.ami.id to be populated", + ), + ) + } + + if providerSpec.AMI.ARN != nil { + warnings = append( + warnings, + "can't use providerSpec.ami.arn, only providerSpec.ami.id can be used to reference AMI", + ) + } + + if providerSpec.AMI.Filters != nil { + warnings = append( + warnings, + "can't use providerSpec.ami.filters, only providerSpec.ami.id can be used to reference AMI", + ) + } + + if providerSpec.Placement.Region == "" { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "placement", "region"), + "expected providerSpec.placement.region to be populated", + ), + ) + } + + if providerSpec.InstanceType == "" { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "instanceType"), + "expected providerSpec.instanceType to be populated", + ), + ) + } + + if providerSpec.UserDataSecret == nil { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "userDataSecret"), + "expected providerSpec.userDataSecret to be populated", + ), + ) + } + + if providerSpec.CredentialsSecret == nil { + errs = append( + errs, + field.Required( + field.NewPath("providerSpec", "credentialsSecret"), + "expected providerSpec.credentialsSecret to be populated", + ), + ) + } else { + warnings = append(warnings, credentialsSecretExists(config.client, providerSpec.CredentialsSecret.Name, m.GetNamespace())...) + } + + if providerSpec.Subnet.ARN == nil && providerSpec.Subnet.ID == nil && providerSpec.Subnet.Filters == nil { + warnings = append( + warnings, + "providerSpec.subnet: No subnet has been provided. Instances may be created in an unexpected subnet and may not join the cluster.", + ) + } + + if providerSpec.IAMInstanceProfile == nil { + warnings = append(warnings, "providerSpec.iamInstanceProfile: no IAM instance profile provided: nodes may be unable to join the cluster") + } + + // TODO(alberto): Validate providerSpec.BlockDevices. + // https://github.com/openshift/cluster-api-provider-aws/pull/299#discussion_r433920532 + + switch providerSpec.Placement.Tenancy { + case "", machinev1beta1.DefaultTenancy, machinev1beta1.DedicatedTenancy, machinev1beta1.HostTenancy: + // Do nothing, valid values + default: + errs = append( + errs, + field.Invalid( + field.NewPath("providerSpec", "tenancy"), + providerSpec.Placement.Tenancy, + fmt.Sprintf("Invalid providerSpec.tenancy, the only allowed options are: %s, %s, %s", machinev1beta1.DefaultTenancy, machinev1beta1.DedicatedTenancy, machinev1beta1.HostTenancy), + ), + ) + } + + duplicatedTags := getDuplicatedTags(providerSpec.Tags) + if len(duplicatedTags) > 0 { + warnings = append(warnings, fmt.Sprintf("providerSpec.tags: duplicated tag names (%s): only the first value will be used.", strings.Join(duplicatedTags, ","))) + } + + switch providerSpec.NetworkInterfaceType { + case "", machinev1beta1.AWSENANetworkInterfaceType, machinev1beta1.AWSEFANetworkInterfaceType: + // Do nothing, valid values + default: + errs = append( + errs, + field.Invalid( + field.NewPath("providerSpec", "networkInterfaceType"), + providerSpec.NetworkInterfaceType, + fmt.Sprintf("Valid values are: %s, %s and omitted", machinev1beta1.AWSENANetworkInterfaceType, machinev1beta1.AWSEFANetworkInterfaceType), + ), + ) + } + + switch providerSpec.MetadataServiceOptions.Authentication { + case "", machinev1beta1.MetadataServiceAuthenticationOptional, machinev1beta1.MetadataServiceAuthenticationRequired: + // Valid values + default: + errs = append( + errs, + field.Invalid( + field.NewPath("providerSpec", "metadataServiceOptions", "authentication"), + providerSpec.MetadataServiceOptions.Authentication, + fmt.Sprintf("Allowed values are either '%s' or '%s'", machinev1beta1.MetadataServiceAuthenticationOptional, machinev1beta1.MetadataServiceAuthenticationRequired), + ), + ) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + return true, warnings, nil +} + +// getDuplicatedTags iterates through the AWS TagSpecifications +// to determine if any tag Name is duplicated within the list. +// A list of duplicated names will be returned. +func getDuplicatedTags(tagSpecs []machinev1beta1.TagSpecification) []string { + tagNames := map[string]int{} + duplicatedTags := []string{} + for _, spec := range tagSpecs { + tagNames[spec.Name] += 1 + // Only append the duplicated tag on the second occurrence to prevent it + // being listed multiple times when there are more than 2 occurrences. + if tagNames[spec.Name] == 2 { + duplicatedTags = append(duplicatedTags, spec.Name) + } + } + return duplicatedTags +} + +func defaultAzure(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Defaulting Azure providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1beta1.AzureMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.VMSize == "" { + providerSpec.VMSize = defaultAzureVMSize + } + + // Vnet and Subnet need to be provided together by the user + if providerSpec.Vnet == "" && providerSpec.Subnet == "" { + providerSpec.Vnet = defaultAzureVnet(config.clusterID) + providerSpec.Subnet = defaultAzureSubnet(config.clusterID) + } + + if providerSpec.Image == (machinev1beta1.Image{}) { + providerSpec.Image.ResourceID = defaultAzureImageResourceID(config.clusterID) + } + + if providerSpec.UserDataSecret == nil { + providerSpec.UserDataSecret = &corev1.SecretReference{Name: defaultUserDataSecret} + } else if providerSpec.UserDataSecret.Name == "" { + providerSpec.UserDataSecret.Name = defaultUserDataSecret + } + + if providerSpec.CredentialsSecret == nil { + providerSpec.CredentialsSecret = &corev1.SecretReference{Name: defaultAzureCredentialsSecret, Namespace: defaultSecretNamespace} + } else { + if providerSpec.CredentialsSecret.Namespace == "" { + providerSpec.CredentialsSecret.Namespace = defaultSecretNamespace + } + if providerSpec.CredentialsSecret.Name == "" { + providerSpec.CredentialsSecret.Name = defaultAzureCredentialsSecret + } + } + + rawBytes, err := json.Marshal(providerSpec) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + m.Spec.ProviderSpec.Value = &kruntime.RawExtension{Raw: rawBytes} + return true, warnings, nil +} + +func validateAzure(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Validating Azure providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1beta1.AzureMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.VMSize == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "vmSize"), "vmSize should be set to one of the supported Azure VM sizes")) + } + + if providerSpec.PublicIP && config.dnsDisconnected { + errs = append(errs, field.Forbidden(field.NewPath("providerSpec", "publicIP"), "publicIP is not allowed in Azure disconnected installation")) + } + // Vnet requires Subnet + if providerSpec.Vnet != "" && providerSpec.Subnet == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "subnet"), "must provide a subnet when a virtual network is specified")) + } + + // Subnet requires Vnet + if providerSpec.Subnet != "" && providerSpec.Vnet == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "vnet"), "must provide a virtual network when supplying subnets")) + } + + errs = append(errs, validateAzureImage(providerSpec.Image)...) + + if providerSpec.UserDataSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "userDataSecret must be provided")) + } else if providerSpec.UserDataSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "name must be provided")) + } + + if providerSpec.CredentialsSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "credentialsSecret must be provided")) + } else { + if providerSpec.CredentialsSecret.Namespace == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "namespace"), "namespace must be provided")) + } + if providerSpec.CredentialsSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "name must be provided")) + } + if providerSpec.CredentialsSecret.Name != "" && providerSpec.CredentialsSecret.Namespace != "" { + warnings = append(warnings, credentialsSecretExists(config.client, providerSpec.CredentialsSecret.Name, providerSpec.CredentialsSecret.Namespace)...) + } + } + + if providerSpec.OSDisk.DiskSizeGB <= 0 || providerSpec.OSDisk.DiskSizeGB >= azureMaxDiskSizeGB { + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "osDisk", "diskSizeGB"), providerSpec.OSDisk.DiskSizeGB, "diskSizeGB must be greater than zero and less than 32768")) + } + + if providerSpec.OSDisk.DiskSettings.EphemeralStorageLocation != azureEphemeralStorageLocationLocal && providerSpec.OSDisk.DiskSettings.EphemeralStorageLocation != "" { + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "osDisk", "diskSettings", "ephemeralStorageLocation"), providerSpec.OSDisk.DiskSettings.EphemeralStorageLocation, + fmt.Sprintf("osDisk.diskSettings.ephemeralStorageLocation can either be omitted or set to %s", azureEphemeralStorageLocationLocal))) + } + + switch providerSpec.OSDisk.CachingType { + case azureCachingTypeNone, azureCachingTypeReadOnly, azureCachingTypeReadWrite, "": + // Valid scenarios, do nothing + default: + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "osDisk", "cachingType"), providerSpec.OSDisk.CachingType, + fmt.Sprintf("osDisk.cachingType can be only %s, %s, %s or omitted", azureCachingTypeNone, azureCachingTypeReadOnly, azureCachingTypeReadWrite))) + } + + if providerSpec.OSDisk.DiskSettings.EphemeralStorageLocation == azureEphemeralStorageLocationLocal && providerSpec.OSDisk.CachingType != azureCachingTypeReadOnly { + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "osDisk", "cachingType"), providerSpec.OSDisk.CachingType, "Instances using an ephemeral OS disk support only Readonly caching")) + } + + switch providerSpec.UltraSSDCapability { + case machinev1beta1.AzureUltraSSDCapabilityEnabled, machinev1beta1.AzureUltraSSDCapabilityDisabled, "": + // Valid scenarios, do nothing + default: + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "ultraSSDCapability"), providerSpec.UltraSSDCapability, + fmt.Sprintf("ultraSSDCapability can be only %s, %s or omitted", machinev1beta1.AzureUltraSSDCapabilityEnabled, machinev1beta1.AzureUltraSSDCapabilityDisabled))) + } + + errs = append(errs, validateAzureDataDisks(m.Name, providerSpec, field.NewPath("providerSpec", "dataDisks"))...) + + if isAzureGovCloud(config.platformStatus) && providerSpec.SpotVMOptions != nil { + warnings = append(warnings, "spot VMs may not be supported when using GovCloud region") + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +func validateAzureImage(image machinev1beta1.Image) []error { + errors := []error{} + if image == (machinev1beta1.Image{}) { + return append(errors, field.Required(field.NewPath("providerSpec", "image"), "an image reference must be provided")) + } + + if image.ResourceID != "" { + if image != (machinev1beta1.Image{ResourceID: image.ResourceID}) { + return append(errors, field.Required(field.NewPath("providerSpec", "image", "resourceID"), "resourceID is already specified, other fields such as [Offer, Publisher, SKU, Version] should not be set")) + } + return errors + } + + // Resource ID not provided, so Offer, Publisher, SKU and Version are required + if image.Offer == "" { + errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "Offer"), "Offer must be provided")) + } + if image.Publisher == "" { + errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "Publisher"), "Publisher must be provided")) + } + if image.SKU == "" { + errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "SKU"), "SKU must be provided")) + } + if image.Version == "" { + errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "Version"), "Version must be provided")) + } + + return errors +} + +func defaultGCP(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Defaulting GCP providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1beta1.GCPMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.MachineType == "" { + providerSpec.MachineType = defaultGCPMachineType + } + + if len(providerSpec.NetworkInterfaces) == 0 { + providerSpec.NetworkInterfaces = append(providerSpec.NetworkInterfaces, &machinev1beta1.GCPNetworkInterface{ + Network: defaultGCPNetwork(config.clusterID), + Subnetwork: defaultGCPSubnetwork(config.clusterID), + }) + } + + providerSpec.Disks = defaultGCPDisks(providerSpec.Disks, config.clusterID) + + if len(providerSpec.GPUs) != 0 { + // In case Count was not set it should default to 1, since there is no valid reason for it to be purposely set to 0. + if providerSpec.GPUs[0].Count == 0 { + providerSpec.GPUs[0].Count = defaultGCPGPUCount + } + } + + if len(providerSpec.Tags) == 0 { + providerSpec.Tags = defaultGCPTags(config.clusterID) + } + + if providerSpec.UserDataSecret == nil { + providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} + } + + if providerSpec.CredentialsSecret == nil { + providerSpec.CredentialsSecret = &corev1.LocalObjectReference{Name: defaultGCPCredentialsSecret} + } + + rawBytes, err := json.Marshal(providerSpec) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + m.Spec.ProviderSpec.Value = &kruntime.RawExtension{Raw: rawBytes} + return true, warnings, nil +} + +func defaultGCPDisks(disks []*machinev1beta1.GCPDisk, clusterID string) []*machinev1beta1.GCPDisk { + if len(disks) == 0 { + return []*machinev1beta1.GCPDisk{ + { + AutoDelete: true, + Boot: true, + SizeGB: defaultGCPDiskSizeGb, + Type: defaultGCPDiskType, + Image: defaultGCPDiskImage, + }, + } + } + + for _, disk := range disks { + if disk.Type == "" { + disk.Type = defaultGCPDiskType + } + + if disk.Image == "" { + disk.Image = defaultGCPDiskImage + } + } + + return disks +} + +func validateGCP(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Validating GCP providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1beta1.GCPMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.Region == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "region"), "region is required")) + } + + if !strings.HasPrefix(providerSpec.Zone, providerSpec.Region) { + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "zone"), providerSpec.Zone, fmt.Sprintf("zone not in configured region (%s)", providerSpec.Region))) + } + + if providerSpec.MachineType == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "machineType"), "machineType should be set to one of the supported GCP machine types")) + } + + if providerSpec.OnHostMaintenance != "" && providerSpec.OnHostMaintenance != machinev1beta1.MigrateHostMaintenanceType && providerSpec.OnHostMaintenance != machinev1beta1.TerminateHostMaintenanceType { + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "onHostMaintenance"), providerSpec.OnHostMaintenance, fmt.Sprintf("onHostMaintenance must be either %s or %s.", machinev1beta1.MigrateHostMaintenanceType, machinev1beta1.TerminateHostMaintenanceType))) + } + + if providerSpec.RestartPolicy != "" && providerSpec.RestartPolicy != machinev1beta1.RestartPolicyAlways && providerSpec.RestartPolicy != machinev1beta1.RestartPolicyNever { + errs = append(errs, field.Invalid(field.NewPath("providerSpec", "restartPolicy"), providerSpec.RestartPolicy, fmt.Sprintf("restartPolicy must be either %s or %s.", machinev1beta1.RestartPolicyNever, machinev1beta1.RestartPolicyAlways))) + } + + if len(providerSpec.GPUs) != 0 || strings.HasPrefix(providerSpec.MachineType, "a2-") { + if providerSpec.OnHostMaintenance == machinev1beta1.MigrateHostMaintenanceType { + errs = append(errs, field.Forbidden(field.NewPath("providerSpec", "onHostMaintenance"), fmt.Sprintf("When GPUs are specified or using machineType with pre-attached GPUs(A2 machine family), onHostMaintenance must be set to %s.", machinev1beta1.TerminateHostMaintenanceType))) + } + } + + errs = append(errs, validateGCPNetworkInterfaces(providerSpec.NetworkInterfaces, field.NewPath("providerSpec", "networkInterfaces"))...) + errs = append(errs, validateGCPDisks(providerSpec.Disks, field.NewPath("providerSpec", "disks"))...) + errs = append(errs, validateGCPGPUs(providerSpec.GPUs, field.NewPath("providerSpec", "gpus"), providerSpec.MachineType)...) + + if len(providerSpec.ServiceAccounts) == 0 { + warnings = append(warnings, "providerSpec.serviceAccounts: no service account provided: nodes may be unable to join the cluster") + } else { + errs = append(errs, validateGCPServiceAccounts(providerSpec.ServiceAccounts, field.NewPath("providerSpec", "serviceAccounts"))...) + } + + if providerSpec.UserDataSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "userDataSecret must be provided")) + } else { + if providerSpec.UserDataSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "name must be provided")) + } + } + + if providerSpec.CredentialsSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "credentialsSecret must be provided")) + } else { + if providerSpec.CredentialsSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "name must be provided")) + } else { + warnings = append(warnings, credentialsSecretExists(config.client, providerSpec.CredentialsSecret.Name, m.GetNamespace())...) + } + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +func validateGCPNetworkInterfaces(networkInterfaces []*machinev1beta1.GCPNetworkInterface, parentPath *field.Path) []error { + if len(networkInterfaces) == 0 { + return []error{field.Required(parentPath, "at least 1 network interface is required")} + } + + var errs []error + for i, ni := range networkInterfaces { + fldPath := parentPath.Index(i) + + if ni.Network == "" { + errs = append(errs, field.Required(fldPath.Child("network"), "network is required")) + } + + if ni.Subnetwork == "" { + errs = append(errs, field.Required(fldPath.Child("subnetwork"), "subnetwork is required")) + } + } + + return errs +} + +func validateGCPDisks(disks []*machinev1beta1.GCPDisk, parentPath *field.Path) []error { + if len(disks) == 0 { + return []error{field.Required(parentPath, "at least 1 disk is required")} + } + + var errs []error + for i, disk := range disks { + fldPath := parentPath.Index(i) + + if disk.SizeGB != 0 { + if disk.SizeGB < 16 { + errs = append(errs, field.Invalid(fldPath.Child("sizeGb"), disk.SizeGB, "must be at least 16GB in size")) + } else if disk.SizeGB > 65536 { + errs = append(errs, field.Invalid(fldPath.Child("sizeGb"), disk.SizeGB, "exceeding maximum GCP disk size limit, must be below 65536")) + } + } + + if disk.Type != "" { + diskTypes := sets.NewString("pd-standard", "pd-ssd", "pd-balanced") + if !diskTypes.Has(disk.Type) { + errs = append(errs, field.NotSupported(fldPath.Child("type"), disk.Type, diskTypes.List())) + } + } + } + + return errs +} + +func validateGCPGPUs(guestAccelerators []machinev1beta1.GCPGPUConfig, parentPath *field.Path, machineType string) []error { + var errs []error + if len(guestAccelerators) > 1 { + errs = append(errs, field.TooMany(parentPath, len(guestAccelerators), 1)) + } else if len(guestAccelerators) == 1 { + accelerator := guestAccelerators[0] + if accelerator.Type == "" { + errs = append(errs, field.Required(parentPath.Child("Type"), "Type is required")) + } + + if accelerator.Type == "nvidia-tesla-a100" { + errs = append(errs, field.Invalid(parentPath.Child("Type"), accelerator.Type, " nvidia-tesla-a100 gpus, are only attached to the A2 machine types")) + } + + if strings.HasPrefix(machineType, "a2-") { + errs = append(errs, field.Invalid(parentPath, accelerator.Type, "A2 machine types have already attached gpus, additional gpus cannot be specified")) + } + } + return errs +} + +func validateGCPServiceAccounts(serviceAccounts []machinev1beta1.GCPServiceAccount, parentPath *field.Path) []error { + if len(serviceAccounts) != 1 { + return []error{field.Invalid(parentPath, fmt.Sprintf("%d service accounts supplied", len(serviceAccounts)), "exactly 1 service account must be supplied")} + } + + var errs []error + for i, serviceAccount := range serviceAccounts { + fldPath := parentPath.Index(i) + + if serviceAccount.Email == "" { + errs = append(errs, field.Required(fldPath.Child("email"), "email is required")) + } + + if len(serviceAccount.Scopes) == 0 { + errs = append(errs, field.Required(fldPath.Child("scopes"), "at least 1 scope is required")) + } + } + return errs +} + +func defaultVSphere(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Defaulting vSphere providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1beta1.VSphereMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.UserDataSecret == nil { + providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} + } + + if providerSpec.CredentialsSecret == nil { + providerSpec.CredentialsSecret = &corev1.LocalObjectReference{Name: defaultVSphereCredentialsSecret} + } + + rawBytes, err := json.Marshal(providerSpec) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + m.Spec.ProviderSpec.Value = &kruntime.RawExtension{Raw: rawBytes} + return true, warnings, nil +} + +func validateVSphere(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Validating vSphere providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1beta1.VSphereMachineProviderSpec) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.Template == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "template"), "template must be provided")) + } + + workspaceWarnings, workspaceErrors := validateVSphereWorkspace(providerSpec.Workspace, field.NewPath("providerSpec", "workspace")) + warnings = append(warnings, workspaceWarnings...) + errs = append(errs, workspaceErrors...) + + errs = append(errs, validateVSphereNetwork(providerSpec.Network, field.NewPath("providerSpec", "network"))...) + + if providerSpec.NumCPUs < minVSphereCPU { + warnings = append(warnings, fmt.Sprintf("providerSpec.numCPUs: %d is missing or less than the minimum value (%d): nodes may not boot correctly", providerSpec.NumCPUs, minVSphereCPU)) + } + if providerSpec.MemoryMiB < minVSphereMemoryMiB { + warnings = append(warnings, fmt.Sprintf("providerSpec.memoryMiB: %d is missing or less than the recommended minimum value (%d): nodes may not boot correctly", providerSpec.MemoryMiB, minVSphereMemoryMiB)) + } + if providerSpec.DiskGiB < minVSphereDiskGiB { + warnings = append(warnings, fmt.Sprintf("providerSpec.diskGiB: %d is missing or less than the recommended minimum (%d): nodes may fail to start if disk size is too low", providerSpec.DiskGiB, minVSphereDiskGiB)) + } + if providerSpec.CloneMode == machinev1beta1.LinkedClone && providerSpec.DiskGiB > 0 { + warnings = append(warnings, fmt.Sprintf("%s clone mode is set. DiskGiB parameter will be ignored, disk size from template will be used.", machinev1beta1.LinkedClone)) + } + + if providerSpec.UserDataSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "userDataSecret must be provided")) + } else { + if providerSpec.UserDataSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "name must be provided")) + } + } + + if providerSpec.CredentialsSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "credentialsSecret must be provided")) + } else { + if providerSpec.CredentialsSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "name must be provided")) + } else { + warnings = append(warnings, credentialsSecretExists(config.client, providerSpec.CredentialsSecret.Name, m.GetNamespace())...) + } + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +func validateVSphereWorkspace(workspace *machinev1beta1.Workspace, parentPath *field.Path) ([]string, []error) { + if workspace == nil { + return []string{}, []error{field.Required(parentPath, "workspace must be provided")} + } + + var errs []error + var warnings []string + if workspace.Server == "" { + errs = append(errs, field.Required(parentPath.Child("server"), "server must be provided")) + } + if workspace.Datacenter == "" { + warnings = append(warnings, fmt.Sprintf("%s: datacenter is unset: if more than one datacenter is present, VMs cannot be created", parentPath.Child("datacenter"))) + } + if workspace.Folder != "" { + expectedPrefix := fmt.Sprintf("/%s/vm/", workspace.Datacenter) + if !strings.HasPrefix(workspace.Folder, expectedPrefix) { + errMsg := fmt.Sprintf("folder must be absolute path: expected prefix %q", expectedPrefix) + errs = append(errs, field.Invalid(parentPath.Child("folder"), workspace.Folder, errMsg)) + } + } + + return warnings, errs +} + +func validateVSphereNetwork(network machinev1beta1.NetworkSpec, parentPath *field.Path) []error { + if len(network.Devices) == 0 { + return []error{field.Required(parentPath.Child("devices"), "at least 1 network device must be provided")} + } + + var errs []error + for i, spec := range network.Devices { + fldPath := parentPath.Child("devices").Index(i) + if spec.NetworkName == "" { + errs = append(errs, field.Required(fldPath.Child("networkName"), "networkName must be provided")) + } + } + + return errs +} + +func isAzureGovCloud(platformStatus *osconfigv1.PlatformStatus) bool { + return platformStatus != nil && platformStatus.Azure != nil && + platformStatus.Azure.CloudName != osconfigv1.AzurePublicCloud +} + +func validateMachineLifecycleHooks(m, oldM *machinev1beta1.Machine) []error { + var errs []error + + if isDeleting(m) && oldM != nil { + changedPreDrain := lifecyclehooks.GetChangedLifecycleHooks(oldM.Spec.LifecycleHooks.PreDrain, m.Spec.LifecycleHooks.PreDrain) + if len(changedPreDrain) > 0 { + errs = append(errs, field.Forbidden(field.NewPath("spec", "lifecycleHooks", "preDrain"), fmt.Sprintf("pre-drain hooks are immutable when machine is marked for deletion: the following hooks are new or changed: %+v", changedPreDrain))) + } + + changedPreTerminate := lifecyclehooks.GetChangedLifecycleHooks(oldM.Spec.LifecycleHooks.PreTerminate, m.Spec.LifecycleHooks.PreTerminate) + if len(changedPreTerminate) > 0 { + errs = append(errs, field.Forbidden(field.NewPath("spec", "lifecycleHooks", "preTerminate"), fmt.Sprintf("pre-terminate hooks are immutable when machine is marked for deletion: the following hooks are new or changed: %+v", changedPreTerminate))) + } + } + + return errs +} + +func validateAzureDataDisks(machineName string, spec *machinev1beta1.AzureMachineProviderSpec, parentPath *field.Path) []error { + + var errs []error + dataDiskLuns := make(map[int32]struct{}) + dataDiskNames := make(map[string]struct{}) + // defines rules for matching. strings must start and finish with an alphanumeric character + // and can only contain letters, numbers, underscores, periods or hyphens. + reg := regexp.MustCompile(`^[a-zA-Z0-9](?:[\w\.-]*[a-zA-Z0-9])?$`) + + for i, disk := range spec.DataDisks { + fldPath := parentPath.Index(i) + + dataDiskName := machineName + "_" + disk.NameSuffix + + if len(dataDiskName) > 80 { + errs = append(errs, field.Invalid(fldPath.Child("nameSuffix"), disk.NameSuffix, "too long, the overall disk name must not exceed 80 chars")) + } + + if matched := reg.MatchString(disk.NameSuffix); !matched { + errs = append(errs, field.Invalid(fldPath.Child("nameSuffix"), disk.NameSuffix, "nameSuffix must be provided, must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens")) + } + + if _, exists := dataDiskNames[disk.NameSuffix]; exists { + errs = append(errs, field.Invalid(fldPath.Child("nameSuffix"), disk.NameSuffix, "each Data Disk must have a unique nameSuffix")) + } + + if disk.DiskSizeGB < 4 { + errs = append(errs, field.Invalid(fldPath.Child("diskSizeGB"), disk.DiskSizeGB, "diskSizeGB must be provided and at least 4GB in size")) + } + + if disk.Lun < 0 || disk.Lun > 63 { + errs = append(errs, field.Invalid(fldPath.Child("lun"), disk.Lun, "must be greater than or equal to 0 and less than 64")) + } + + if _, exists := dataDiskLuns[disk.Lun]; exists { + errs = append(errs, field.Invalid(fldPath.Child("lun"), disk.Lun, "each Data Disk must have a unique lun")) + } + + switch disk.DeletionPolicy { + case machinev1beta1.DiskDeletionPolicyTypeDelete, machinev1beta1.DiskDeletionPolicyTypeDetach: + // Valid scenarios, do nothing + case "": + errs = append(errs, field.Required(fldPath.Child("deletionPolicy"), + fmt.Sprintf("deletionPolicy must be provided and must be either %s or %s", + machinev1beta1.DiskDeletionPolicyTypeDelete, machinev1beta1.DiskDeletionPolicyTypeDetach))) + default: + errs = append(errs, field.Invalid(fldPath.Child("deletionPolicy"), disk.DeletionPolicy, + fmt.Sprintf("must be either %s or %s", machinev1beta1.DiskDeletionPolicyTypeDelete, machinev1beta1.DiskDeletionPolicyTypeDetach))) + } + + if (disk.ManagedDisk.StorageAccountType == machinev1beta1.StorageAccountUltraSSDLRS) && + (disk.CachingType != machinev1beta1.CachingTypeNone && disk.CachingType != "") { + errs = append(errs, + field.Invalid(fldPath.Child("cachingType"), + disk.CachingType, + fmt.Sprintf("must be \"None\" or omitted when storageAccountType is \"%s\"", machinev1beta1.StorageAccountUltraSSDLRS)), + ) + } + + dataDiskLuns[disk.Lun] = struct{}{} + dataDiskNames[disk.NameSuffix] = struct{}{} + } + + return errs +} + +func defaultPowerVS(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Defaulting PowerVS providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1.PowerVSMachineProviderConfig) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + if providerSpec.UserDataSecret == nil { + providerSpec.UserDataSecret = &machinev1.PowerVSSecretReference{Name: defaultUserDataSecret} + } + if providerSpec.CredentialsSecret == nil { + providerSpec.CredentialsSecret = &machinev1.PowerVSSecretReference{Name: defaultPowerVSCredentialsSecret} + } + if providerSpec.SystemType == "" { + providerSpec.SystemType = defaultPowerVSSysType + } + if providerSpec.ProcessorType == "" { + providerSpec.ProcessorType = defaultPowerVSProcType + } + if providerSpec.Processors.IntVal == 0 || providerSpec.Processors.StrVal == "" { + switch providerSpec.ProcessorType { + case machinev1.PowerVSProcessorTypeDedicated: + providerSpec.Processors = intstr.IntOrString{Type: intstr.Int, IntVal: 1} + default: + providerSpec.Processors = intstr.IntOrString{Type: intstr.String, StrVal: "0.5"} + } + } + if providerSpec.MemoryGiB == 0 { + providerSpec.MemoryGiB = defaultPowerVSMemory + } + + rawBytes, err := json.Marshal(providerSpec) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + + m.Spec.ProviderSpec.Value = &kruntime.RawExtension{Raw: rawBytes} + return true, warnings, nil +} + +func validatePowerVS(m *machinev1beta1.Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { + klog.V(3).Infof("Validating PowerVS providerSpec") + + var errs []error + var warnings []string + providerSpec := new(machinev1.PowerVSMachineProviderConfig) + if err := unmarshalInto(m, providerSpec); err != nil { + errs = append(errs, err) + return false, warnings, utilerrors.NewAggregate(errs) + } + + if providerSpec.KeyPairName == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "keyPairName"), "providerSpec.keyPairName must be provided")) + } + + serviceInstanceErrors := validatePowerVSResourceIdentifiers(providerSpec.ServiceInstance, powerVSServiceInstance, field.NewPath("providerSpec", "serviceInstance")) + errs = append(errs, serviceInstanceErrors...) + + imageErrors := validatePowerVSResourceIdentifiers(providerSpec.Image, powerVSImage, field.NewPath("providerSpec", "image")) + errs = append(errs, imageErrors...) + + networkErrors := validatePowerVSResourceIdentifiers(providerSpec.Network, powerVSNetwork, field.NewPath("providerSpec", "network")) + errs = append(errs, networkErrors...) + + if providerSpec.UserDataSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "providerSpec.userDataSecret must be provided")) + } else { + if providerSpec.UserDataSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "providerSpec.userDataSecret.name must be provided")) + } + } + + if providerSpec.CredentialsSecret == nil { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "providerSpec.credentialsSecret must be provided")) + } else { + if providerSpec.CredentialsSecret.Name == "" { + errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "providerSpec.credentialsSecret.name must be provided")) + } else { + warnings = append(warnings, credentialsSecretExists(config.client, providerSpec.CredentialsSecret.Name, m.GetNamespace())...) + } + } + + machineConfigWarnings, machineConfigErrors := validateMachineConfigurations(providerSpec, field.NewPath("providerSpec")) + warnings = append(warnings, machineConfigWarnings...) + errs = append(errs, machineConfigErrors...) + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +func validatePowerVSResourceIdentifiers(serviceInstance machinev1.PowerVSResource, resourceType string, parentPath *field.Path) (errs []error) { + switch serviceInstance.Type { + case machinev1.PowerVSResourceTypeID: + if serviceInstance.ID == nil { + errs = append(errs, field.Required(parentPath.Child("id"), + fmt.Sprintf("%s identifier is specified as ID but the value is nil", resourceType))) + } + case machinev1.PowerVSResourceTypeName: + if serviceInstance.Name == nil { + errs = append(errs, field.Required(parentPath.Child("name"), + fmt.Sprintf("%s identifier is specified as Name but the value is nil", resourceType))) + } + case machinev1.PowerVSResourceTypeRegEx: + if resourceType == powerVSServiceInstance || resourceType == powerVSImage { + errs = append(errs, field.Invalid(parentPath, serviceInstance.Type, + fmt.Sprintf("%s identifier is specified as %s but only %s and %s are valid resource identifiers", + resourceType, serviceInstance.Type, machinev1.PowerVSResourceTypeID, machinev1.PowerVSResourceTypeName))) + } + if serviceInstance.RegEx == nil { + errs = append(errs, field.Required(parentPath.Child("regex"), + fmt.Sprintf("%s identifier is specified as Regex but the value is nil", resourceType))) + } + case "": + errs = append(errs, field.Required(parentPath, + fmt.Sprintf("%s identifier must be provided", resourceType))) + default: + errs = append(errs, field.Invalid(parentPath, serviceInstance.Type, + fmt.Sprintf("%s identifier is specified as %s but only %s, %s and %s are valid resource identifiers", + resourceType, serviceInstance.Type, machinev1.PowerVSResourceTypeID, machinev1.PowerVSResourceTypeName, machinev1.PowerVSResourceTypeRegEx))) + } + return errs +} + +func validateMachineConfigurations(providerSpec *machinev1.PowerVSMachineProviderConfig, parentPath *field.Path) (warnings []string, errs []error) { + if providerSpec == nil { + errs = append(errs, []error{field.Required(parentPath, "providerSpec must be provided")}...) + return + } + if val, found := powerVSMachineConfigurations[providerSpec.SystemType]; !found { + warnings = append(warnings, fmt.Sprintf("providerSpec.SystemType: %s is not known, Currently known system types are %s, %s and %s", providerSpec.SystemType, defaultPowerVSSysType, powerVSSystemTypeE980, powerVSSystemTypeE880)) + } else { + if providerSpec.MemoryGiB > val.maxMemoryGiB { + errs = append(errs, field.Invalid(parentPath.Child("memoryGiB"), providerSpec.MemoryGiB, fmt.Sprintf("for %s systemtype the maximum supported memory value is %d", providerSpec.SystemType, val.maxMemoryGiB))) + } + + if providerSpec.MemoryGiB < val.minMemoryGiB { + warnings = append(warnings, fmt.Sprintf("providerspec.MemoryGiB %d is less than the minimum value %d", providerSpec.MemoryGiB, val.minMemoryGiB)) + } + + processor, err := getPowerVSProcessorValue(providerSpec.Processors) + if err != nil { + errs = append(errs, fmt.Errorf("error while getting processor vlaue %w", err)) + return + } else { + if processor > val.maxProcessor { + errs = append(errs, field.Invalid(parentPath.Child("processor"), processor, fmt.Sprintf("for %s systemtype the maximum supported processor value is %f", providerSpec.SystemType, val.maxProcessor))) + } + } + // validate minimum processor values depending on ProcessorType + if providerSpec.ProcessorType == machinev1.PowerVSProcessorTypeDedicated { + if processor < val.minProcessorDedicated { + warnings = append(warnings, fmt.Sprintf("providerspec.Processor %f is less than the minimum value %f for providerSpec.ProcessorType: %s", processor, val.minProcessorDedicated, providerSpec.ProcessorType)) + } + } else { + if processor < val.minProcessorSharedCapped { + warnings = append(warnings, fmt.Sprintf("providerspec.Processor %f is less than the minimum value %f for providerSpec.ProcessorType: %s", processor, val.minProcessorSharedCapped, providerSpec.ProcessorType)) + } + } + } + return +} + +func getPowerVSProcessorValue(processor intstr.IntOrString) (processors float64, err error) { + switch processor.Type { + case intstr.Int: + processors = float64(processor.IntVal) + case intstr.String: + processors, err = strconv.ParseFloat(processor.StrVal, 64) + if err != nil { + err = fmt.Errorf("failed to convert Processors %s to float64", processor.StrVal) + } + } + return +} + +func isDeleting(obj metav1.Object) bool { + return obj.GetDeletionTimestamp() != nil +} + +// isFinalizerOnlyRemoval checks if the machine update only removes finalizers. +func isFinalizerOnlyRemoval(m, oldM *machinev1beta1.Machine) (bool, error) { + patchBase := client.MergeFrom(oldM) + data, err := patchBase.Data(m) + if err != nil { + return false, fmt.Errorf("cannot calculate patch data from machine object: %w", err) + } + + return string(data) == `{"metadata":{"finalizers":[""]}}`, nil +} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/webhooks/machineset_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/webhooks/machineset_webhook.go new file mode 100644 index 000000000..ac554717c --- /dev/null +++ b/vendor/github.com/openshift/machine-api-operator/pkg/webhooks/machineset_webhook.go @@ -0,0 +1,186 @@ +package webhooks + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + + osconfigv1 "github.com/openshift/api/config/v1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// machineSetValidatorHandler validates MachineSet API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineSetValidatorHandler struct { + *admissionHandler +} + +// machineSetDefaulterHandler defaults MachineSet API resources. +// implements type Handler interface. +// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler +type machineSetDefaulterHandler struct { + *admissionHandler +} + +// NewMachineSetValidator returns a new machineSetValidatorHandler. +func NewMachineSetValidator(client client.Client) (*machineSetValidatorHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + dns, err := getDNS() + if err != nil { + return nil, err + } + + return createMachineSetValidator(infra, client, dns), nil +} + +func createMachineSetValidator(infra *osconfigv1.Infrastructure, client client.Client, dns *osconfigv1.DNS) *machineSetValidatorHandler { + admissionConfig := &admissionConfig{ + dnsDisconnected: dns.Spec.PublicZone == nil, + clusterID: infra.Status.InfrastructureName, + client: client, + } + return &machineSetValidatorHandler{ + admissionHandler: &admissionHandler{ + admissionConfig: admissionConfig, + webhookOperations: getMachineValidatorOperation(infra.Status.PlatformStatus.Type), + }, + } +} + +// NewMachineSetDefaulter returns a new machineSetDefaulterHandler. +func NewMachineSetDefaulter() (*machineSetDefaulterHandler, error) { + infra, err := getInfra() + if err != nil { + return nil, err + } + + return createMachineSetDefaulter(infra.Status.PlatformStatus, infra.Status.InfrastructureName), nil +} + +func createMachineSetDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineSetDefaulterHandler { + return &machineSetDefaulterHandler{ + admissionHandler: &admissionHandler{ + admissionConfig: &admissionConfig{clusterID: clusterID}, + webhookOperations: getMachineDefaulterOperation(platformStatus), + }, + } +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineSetValidatorHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + ms := &machinev1beta1.MachineSet{} + + if err := h.decoder.Decode(req, ms); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + var oldMS *machinev1beta1.MachineSet + if len(req.OldObject.Raw) > 0 { + // oldMS must only be initialised if there is an old object (ie on UPDATE or DELETE). + // It should be nil otherwise to allow skipping certain validations that rely on + // the presence of the old object. + oldMS = &machinev1beta1.MachineSet{} + if err := h.decoder.DecodeRaw(req.OldObject, oldMS); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + } + + klog.V(3).Infof("Validate webhook called for MachineSet: %s", ms.GetName()) + + ok, warnings, errs := h.validateMachineSet(ms, oldMS) + if !ok { + return admission.Denied(errs.Error()).WithWarnings(warnings...) + } + + return admission.Allowed("MachineSet valid").WithWarnings(warnings...) +} + +// Handle handles HTTP requests for admission webhook servers. +func (h *machineSetDefaulterHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + ms := &machinev1beta1.MachineSet{} + + if err := h.decoder.Decode(req, ms); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + klog.V(3).Infof("Mutate webhook called for MachineSet: %s", ms.GetName()) + + ok, warnings, errs := h.defaultMachineSet(ms) + if !ok { + return admission.Denied(errs.Error()).WithWarnings(warnings...) + } + + marshaledMachineSet, err := json.Marshal(ms) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err).WithWarnings(warnings...) + } + return admission.PatchResponseFromRaw(req.Object.Raw, marshaledMachineSet).WithWarnings(warnings...) +} + +func (h *machineSetValidatorHandler) validateMachineSet(ms, oldMS *machinev1beta1.MachineSet) (bool, []string, utilerrors.Aggregate) { + errs := validateMachineSetSpec(ms, oldMS) + + // Create a Machine from the MachineSet and validate the Machine template + m := &machinev1beta1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ms.GetNamespace(), + }, + Spec: ms.Spec.Template.Spec, + } + ok, warnings, err := h.webhookOperations(m, h.admissionConfig) + if !ok { + errs = append(errs, err.Errors()...) + } + + if len(errs) > 0 { + return false, warnings, utilerrors.NewAggregate(errs) + } + return true, warnings, nil +} + +func (h *machineSetDefaulterHandler) defaultMachineSet(ms *machinev1beta1.MachineSet) (bool, []string, utilerrors.Aggregate) { + // Create a Machine from the MachineSet and default the Machine template + m := &machinev1beta1.Machine{Spec: ms.Spec.Template.Spec} + ok, warnings, err := h.webhookOperations(m, h.admissionConfig) + if !ok { + return false, warnings, utilerrors.NewAggregate(err.Errors()) + } + + // Restore the defaulted template + ms.Spec.Template.Spec = m.Spec + return true, warnings, nil +} + +// validateMachineSetSpec is used to validate any changes to the MachineSet spec outside of +// the providerSpec. Eg it can be used to verify changes to the selector. +func validateMachineSetSpec(ms, oldMS *machinev1beta1.MachineSet) []error { + var errs []error + if oldMS != nil && !reflect.DeepEqual(ms.Spec.Selector, oldMS.Spec.Selector) { + errs = append(errs, field.Forbidden(field.NewPath("spec", "selector"), "selector is immutable")) + } + + selector, err := metav1.LabelSelectorAsSelector(&ms.Spec.Selector) + if err != nil { + errs = append(errs, field.Invalid(field.NewPath("spec", "selector"), ms.Spec.Selector, fmt.Sprintf("could not convert label selector to selector: %v", err))) + } + if selector != nil && !selector.Matches(labels.Set(ms.Spec.Template.Labels)) { + errs = append(errs, field.Invalid(field.NewPath("spec", "template", "metadata", "labels"), ms.Spec.Template.Labels, "`selector` does not match template `labels`")) + } + + return errs +} diff --git a/vendor/github.com/openshift/machine-api-provider-openstack/LICENSE b/vendor/github.com/openshift/machine-api-provider-openstack/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/openshift/machine-api-provider-openstack/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/doc.go b/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/doc.go new file mode 100644 index 000000000..18932092a --- /dev/null +++ b/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/cluster-api-provider-openstack/pkg/apis/openstackproviderconfig +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta +// +groupName=openstackproviderconfig.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/register.go b/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/register.go new file mode 100644 index 000000000..631eb870e --- /dev/null +++ b/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/register.go @@ -0,0 +1,33 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +const GroupName = "openstackproviderconfig" + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "openstackproviderconfig.openshift.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) diff --git a/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go b/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go new file mode 100644 index 000000000..a30137a30 --- /dev/null +++ b/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/types.go @@ -0,0 +1,359 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. +// TODO(cglaubitz): We might consider to change this to OpenstackMachineProviderSpec +// +k8s:openapi-gen=true +type OpenstackProviderSpec struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The name of the secret containing the openstack credentials + CloudsSecret *corev1.SecretReference `json:"cloudsSecret"` + + // The name of the cloud to use from the clouds secret + CloudName string `json:"cloudName"` + + // The flavor reference for the flavor for your server instance. + Flavor string `json:"flavor"` + + // The name of the image to use for your server instance. + // If the RootVolume is specified, this will be ignored and use rootVolume directly. + Image string `json:"image"` + + // The ssh key to inject in the instance + KeyName string `json:"keyName,omitempty"` + + // The machine ssh username + SshUserName string `json:"sshUserName,omitempty"` + + // A networks object. Required parameter when there are multiple networks defined for the tenant. + // When you do not specify the networks parameter, the server attaches to the only network created for the current tenant. + Networks []NetworkParam `json:"networks,omitempty"` + + // Create and assign additional ports to instances + Ports []PortOpts `json:"ports,omitempty"` + + FloatingIP string `json:"floatingIP,omitempty"` + + // The availability zone from which to launch the server. + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // The names of the security groups to assign to the instance + SecurityGroups []SecurityGroupParam `json:"securityGroups,omitempty"` + + // The name of the secret containing the user data (startup script in most cases) + UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` + + // Whether the server instance is created on a trunk port or not. + Trunk bool `json:"trunk,omitempty"` + + // Machine tags + // Requires Nova api 2.52 minimum! + Tags []string `json:"tags,omitempty"` + + // Metadata mapping. Allows you to create a map of key value pairs to add to the server instance. + ServerMetadata map[string]string `json:"serverMetadata,omitempty"` + + // Config Drive support + ConfigDrive *bool `json:"configDrive,omitempty"` + + // The volume metadata to boot from + RootVolume *RootVolume `json:"rootVolume,omitempty"` + + // The server group to assign the machine to. + ServerGroupID string `json:"serverGroupID,omitempty"` + + // The server group to assign the machine to. A server group with that + // name will be created if it does not exist. If both ServerGroupID and + // ServerGroupName are non-empty, they must refer to the same OpenStack + // resource. + ServerGroupName string `json:"serverGroupName,omitempty"` + + // The subnet that a set of machines will get ingress/egress traffic from + PrimarySubnet string `json:"primarySubnet,omitempty"` +} + +type SecurityGroupParam struct { + // Security Group UID + UUID string `json:"uuid,omitempty"` + // Security Group name + Name string `json:"name,omitempty"` + // Filters used to query security groups in openstack + Filter SecurityGroupFilter `json:"filter,omitempty"` +} + +type SecurityGroupFilter struct { + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ProjectID string `json:"projectId,omitempty"` + Limit int `json:"limit,omitempty"` + Marker string `json:"marker,omitempty"` + SortKey string `json:"sortKey,omitempty"` + SortDir string `json:"sortDir,omitempty"` + Tags string `json:"tags,omitempty"` + TagsAny string `json:"tagsAny,omitempty"` + NotTags string `json:"notTags,omitempty"` + NotTagsAny string `json:"notTagsAny,omitempty"` +} + +type NetworkParam struct { + // The UUID of the network. Required if you omit the port attribute. + UUID string `json:"uuid,omitempty"` + // A fixed IPv4 address for the NIC. + FixedIp string `json:"fixedIp,omitempty"` + // Filters for optional network query + Filter Filter `json:"filter,omitempty"` + // Subnet within a network to use + Subnets []SubnetParam `json:"subnets,omitempty"` + // NoAllowedAddressPairs disables creation of allowed address pairs for the network ports + NoAllowedAddressPairs bool `json:"noAllowedAddressPairs,omitempty"` + // PortTags allows users to specify a list of tags to add to ports created in a given network + PortTags []string `json:"portTags,omitempty"` + VNICType string `json:"vnicType,omitempty"` + Profile map[string]string `json:"profile,omitempty"` + // PortSecurity optionally enables or disables security on ports managed by OpenStack + PortSecurity *bool `json:"portSecurity,omitempty"` +} + +type Filter struct { + Status string `json:"status,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"adminStateUp,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ProjectID string `json:"projectId,omitempty"` + Shared *bool `json:"shared,omitempty"` + ID string `json:"id,omitempty"` + Marker string `json:"marker,omitempty"` + Limit int `json:"limit,omitempty"` + SortKey string `json:"sortKey,omitempty"` + SortDir string `json:"sortDir,omitempty"` + Tags string `json:"tags,omitempty"` + TagsAny string `json:"tagsAny,omitempty"` + NotTags string `json:"notTags,omitempty"` + NotTagsAny string `json:"notTagsAny,omitempty"` +} + +type SubnetParam struct { + // The UUID of the network. Required if you omit the port attribute. + UUID string `json:"uuid,omitempty"` + + // Filters for optional network query + Filter SubnetFilter `json:"filter,omitempty"` + + // PortTags are tags that are added to ports created on this subnet + PortTags []string `json:"portTags,omitempty"` + + // PortSecurity optionally enables or disables security on ports managed by OpenStack + PortSecurity *bool `json:"portSecurity,omitempty"` +} + +type SubnetFilter struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + EnableDHCP *bool `json:"enableDhcp,omitempty"` + NetworkID string `json:"networkId,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ProjectID string `json:"projectId,omitempty"` + IPVersion int `json:"ipVersion,omitempty"` + GatewayIP string `json:"gateway_ip,omitempty"` + CIDR string `json:"cidr,omitempty"` + IPv6AddressMode string `json:"ipv6AddressMode,omitempty"` + IPv6RAMode string `json:"ipv6RaMode,omitempty"` + ID string `json:"id,omitempty"` + SubnetPoolID string `json:"subnetpoolId,omitempty"` + Limit int `json:"limit,omitempty"` + Marker string `json:"marker,omitempty"` + SortKey string `json:"sortKey,omitempty"` + SortDir string `json:"sortDir,omitempty"` + Tags string `json:"tags,omitempty"` + TagsAny string `json:"tagsAny,omitempty"` + NotTags string `json:"notTags,omitempty"` + NotTagsAny string `json:"notTagsAny,omitempty"` +} + +type PortOpts struct { + NetworkID string `json:"networkID" required:"true"` + NameSuffix string `json:"nameSuffix" required:"true"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"adminStateUp,omitempty"` + MACAddress string `json:"macAddress,omitempty"` + FixedIPs []FixedIPs `json:"fixedIPs,omitempty"` + TenantID string `json:"tenantID,omitempty"` + ProjectID string `json:"projectID,omitempty"` + SecurityGroups *[]string `json:"securityGroups,omitempty"` + AllowedAddressPairs []AddressPair `json:"allowedAddressPairs,omitempty"` + Tags []string `json:"tags,omitempty"` + + // The ID of the host where the port is allocated + HostID string `json:"hostID,omitempty"` + + // The virtual network interface card (vNIC) type that is bound to the + // neutron port. + VNICType string `json:"vnicType,omitempty"` + + // A dictionary that enables the application running on the specified + // host to pass and receive virtual network interface (VIF) port-specific + // information to the plug-in. + Profile map[string]string `json:"profile,omitempty"` + + // enable or disable security on a given port + // incompatible with securityGroups and allowedAddressPairs + PortSecurity *bool `json:"portSecurity,omitempty"` + + // Enables and disables trunk at port level. If not provided, openStackMachine.Spec.Trunk is inherited. + Trunk *bool `json:"trunk,omitempty"` +} + +type AddressPair struct { + IPAddress string `json:"ipAddress,omitempty"` + MACAddress string `json:"macAddress,omitempty"` +} + +type FixedIPs struct { + SubnetID string `json:"subnetID"` + IPAddress string `json:"ipAddress,omitempty"` +} + +type RootVolume struct { + SourceType string `json:"sourceType,omitempty"` + SourceUUID string `json:"sourceUUID,omitempty"` + DeviceType string `json:"deviceType"` + VolumeType string `json:"volumeType,omitempty"` + Size int `json:"diskSize,omitempty"` + Zone string `json:"availabilityZone,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenstackClusterProviderSpec is the providerSpec for OpenStack in the cluster object +// +k8s:openapi-gen=true +type OpenstackClusterProviderSpec struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // NodeCIDR is the OpenStack Subnet to be created. Cluster actuator will create a + // network, a subnet with NodeCIDR, and a router connected to this subnet. + // If you leave this empty, no network will be created. + NodeCIDR string `json:"nodeCidr,omitempty"` + // DNSNameservers is the list of nameservers for OpenStack Subnet being created. + DNSNameservers []string `json:"dnsNameservers,omitempty"` + // ExternalNetworkID is the ID of an external OpenStack Network. This is necessary + // to get public internet to the VMs. + ExternalNetworkID string `json:"externalNetworkId,omitempty"` + + // ManagedSecurityGroups defines that kubernetes manages the OpenStack security groups + // for now, that means that we'll create two security groups, one allowing SSH + // and API access from everywhere, and another one that allows all traffic to/from + // machines belonging to that group. In the future, we could make this more flexible. + ManagedSecurityGroups bool `json:"managedSecurityGroups"` + + // Tags for all resources in cluster + Tags []string `json:"tags,omitempty"` + + // Default: True. In case of server tag errors, set to False + DisableServerTags bool `json:"disableServerTags,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenstackClusterProviderStatus contains the status fields +// relevant to OpenStack in the cluster object. +// +k8s:openapi-gen=true +type OpenstackClusterProviderStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Network contains all information about the created OpenStack Network. + // It includes Subnets and Router. + Network *Network `json:"network,omitempty"` + + // ControlPlaneSecurityGroups contains all the information about the OpenStack + // Security Group that needs to be applied to control plane nodes. + // TODO: Maybe instead of two properties, we add a property to the group? + ControlPlaneSecurityGroup *SecurityGroup `json:"controlPlaneSecurityGroup,omitempty"` + + // GlobalSecurityGroup contains all the information about the OpenStack Security + // Group that needs to be applied to all nodes, both control plane and worker nodes. + GlobalSecurityGroup *SecurityGroup `json:"globalSecurityGroup,omitempty"` +} + +// Network represents basic information about the associated OpenStach Neutron Network +type Network struct { + Name string `json:"name"` + ID string `json:"id"` + + Subnet *Subnet `json:"subnet,omitempty"` + Router *Router `json:"router,omitempty"` +} + +// Subnet represents basic information about the associated OpenStack Neutron Subnet +type Subnet struct { + Name string `json:"name"` + ID string `json:"id"` + + CIDR string `json:"cidr"` +} + +// Router represents basic information about the associated OpenStack Neutron Router +type Router struct { + Name string `json:"name"` + ID string `json:"id"` +} + +// SecurityGroup represents the basic information of the associated +// OpenStack Neutron Security Group. +type SecurityGroup struct { + Name string `json:"name"` + ID string `json:"id"` + Rules []SecurityGroupRule `json:"rules"` +} + +// SecurityGroupRule represent the basic information of the associated OpenStack +// Security Group Role. +type SecurityGroupRule struct { + ID string `json:"name"` + Direction string `json:"direction"` + EtherType string `json:"etherType"` + SecurityGroupID string `json:"securityGroupID"` + PortRangeMin int `json:"portRangeMin"` + PortRangeMax int `json:"portRangeMax"` + Protocol string `json:"protocol"` + RemoteGroupID string `json:"remoteGroupID"` + RemoteIPPrefix string `json:"remoteIPPrefix"` +} + +func init() { + SchemeBuilder.Register(&OpenstackProviderSpec{}) + SchemeBuilder.Register(&OpenstackClusterProviderSpec{}) + SchemeBuilder.Register(&OpenstackClusterProviderStatus{}) +} diff --git a/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..55f37e5fb --- /dev/null +++ b/vendor/github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,406 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + if in.AdminStateUp != nil { + in, out := &in.AdminStateUp, &out.AdminStateUp + *out = new(bool) + **out = **in + } + if in.Shared != nil { + in, out := &in.Shared, &out.Shared + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(Subnet) + **out = **in + } + if in.Router != nil { + in, out := &in.Router, &out.Router + *out = new(Router) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkParam) DeepCopyInto(out *NetworkParam) { + *out = *in + in.Filter.DeepCopyInto(&out.Filter) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]SubnetParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParam. +func (in *NetworkParam) DeepCopy() *NetworkParam { + if in == nil { + return nil + } + out := new(NetworkParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenstackClusterProviderSpec) DeepCopyInto(out *OpenstackClusterProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.DNSNameservers != nil { + in, out := &in.DNSNameservers, &out.DNSNameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackClusterProviderSpec. +func (in *OpenstackClusterProviderSpec) DeepCopy() *OpenstackClusterProviderSpec { + if in == nil { + return nil + } + out := new(OpenstackClusterProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenstackClusterProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenstackClusterProviderStatus) DeepCopyInto(out *OpenstackClusterProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(Network) + (*in).DeepCopyInto(*out) + } + if in.ControlPlaneSecurityGroup != nil { + in, out := &in.ControlPlaneSecurityGroup, &out.ControlPlaneSecurityGroup + *out = new(SecurityGroup) + (*in).DeepCopyInto(*out) + } + if in.GlobalSecurityGroup != nil { + in, out := &in.GlobalSecurityGroup, &out.GlobalSecurityGroup + *out = new(SecurityGroup) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackClusterProviderStatus. +func (in *OpenstackClusterProviderStatus) DeepCopy() *OpenstackClusterProviderStatus { + if in == nil { + return nil + } + out := new(OpenstackClusterProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenstackClusterProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenstackProviderSpec) DeepCopyInto(out *OpenstackProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.CloudsSecret != nil { + in, out := &in.CloudsSecret, &out.CloudsSecret + *out = new(v1.SecretReference) + **out = **in + } + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]NetworkParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]SecurityGroupParam, len(*in)) + copy(*out, *in) + } + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.SecretReference) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerMetadata != nil { + in, out := &in.ServerMetadata, &out.ServerMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ConfigDrive != nil { + in, out := &in.ConfigDrive, &out.ConfigDrive + *out = new(bool) + **out = **in + } + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(RootVolume) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackProviderSpec. +func (in *OpenstackProviderSpec) DeepCopy() *OpenstackProviderSpec { + if in == nil { + return nil + } + out := new(OpenstackProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenstackProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootVolume) DeepCopyInto(out *RootVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootVolume. +func (in *RootVolume) DeepCopy() *RootVolume { + if in == nil { + return nil + } + out := new(RootVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Router) DeepCopyInto(out *Router) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Router. +func (in *Router) DeepCopy() *Router { + if in == nil { + return nil + } + out := new(Router) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroup) DeepCopyInto(out *SecurityGroup) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]SecurityGroupRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroup. +func (in *SecurityGroup) DeepCopy() *SecurityGroup { + if in == nil { + return nil + } + out := new(SecurityGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupFilter) DeepCopyInto(out *SecurityGroupFilter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupFilter. +func (in *SecurityGroupFilter) DeepCopy() *SecurityGroupFilter { + if in == nil { + return nil + } + out := new(SecurityGroupFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupParam) DeepCopyInto(out *SecurityGroupParam) { + *out = *in + out.Filter = in.Filter + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupParam. +func (in *SecurityGroupParam) DeepCopy() *SecurityGroupParam { + if in == nil { + return nil + } + out := new(SecurityGroupParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupRule) DeepCopyInto(out *SecurityGroupRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupRule. +func (in *SecurityGroupRule) DeepCopy() *SecurityGroupRule { + if in == nil { + return nil + } + out := new(SecurityGroupRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subnet) DeepCopyInto(out *Subnet) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnet. +func (in *Subnet) DeepCopy() *Subnet { + if in == nil { + return nil + } + out := new(Subnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetFilter) DeepCopyInto(out *SubnetFilter) { + *out = *in + if in.EnableDHCP != nil { + in, out := &in.EnableDHCP, &out.EnableDHCP + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetFilter. +func (in *SubnetFilter) DeepCopy() *SubnetFilter { + if in == nil { + return nil + } + out := new(SubnetFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetParam) DeepCopyInto(out *SubnetParam) { + *out = *in + in.Filter.DeepCopyInto(&out.Filter) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetParam. +func (in *SubnetParam) DeepCopy() *SubnetParam { + if in == nil { + return nil + } + out := new(SubnetParam) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go new file mode 100644 index 000000000..c4d0f5c35 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package collectors provides implementations of prometheus.Collector to +// conveniently collect process and Go-related metrics. +package collectors diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go new file mode 100644 index 000000000..e09f149d7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go @@ -0,0 +1,119 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +type dbStatsCollector struct { + db *sql.DB + + maxOpenConnections *prometheus.Desc + + openConnections *prometheus.Desc + inUseConnections *prometheus.Desc + idleConnections *prometheus.Desc + + waitCount *prometheus.Desc + waitDuration *prometheus.Desc + maxIdleClosed *prometheus.Desc + maxIdleTimeClosed *prometheus.Desc + maxLifetimeClosed *prometheus.Desc +} + +// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. +// See https://golang.org/pkg/database/sql/#DBStats for more information on stats. +func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { + fqName := func(name string) string { + return "go_sql_" + name + } + return &dbStatsCollector{ + db: db, + maxOpenConnections: prometheus.NewDesc( + fqName("max_open_connections"), + "Maximum number of open connections to the database.", + nil, prometheus.Labels{"db_name": dbName}, + ), + openConnections: prometheus.NewDesc( + fqName("open_connections"), + "The number of established connections both in use and idle.", + nil, prometheus.Labels{"db_name": dbName}, + ), + inUseConnections: prometheus.NewDesc( + fqName("in_use_connections"), + "The number of connections currently in use.", + nil, prometheus.Labels{"db_name": dbName}, + ), + idleConnections: prometheus.NewDesc( + fqName("idle_connections"), + "The number of idle connections.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitCount: prometheus.NewDesc( + fqName("wait_count_total"), + "The total number of connections waited for.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitDuration: prometheus.NewDesc( + fqName("wait_duration_seconds_total"), + "The total time blocked waiting for a new connection.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleClosed: prometheus.NewDesc( + fqName("max_idle_closed_total"), + "The total number of connections closed due to SetMaxIdleConns.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleTimeClosed: prometheus.NewDesc( + fqName("max_idle_time_closed_total"), + "The total number of connections closed due to SetConnMaxIdleTime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxLifetimeClosed: prometheus.NewDesc( + fqName("max_lifetime_closed_total"), + "The total number of connections closed due to SetConnMaxLifetime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + } +} + +// Describe implements Collector. +func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.maxOpenConnections + ch <- c.openConnections + ch <- c.inUseConnections + ch <- c.idleConnections + ch <- c.waitCount + ch <- c.waitDuration + ch <- c.maxIdleClosed + ch <- c.maxLifetimeClosed + c.describeNewInGo115(ch) +} + +// Collect implements Collector. +func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { + stats := c.db.Stats() + ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) + ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) + ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) + ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) + ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) + ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) + ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) + ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) + c.collectNewInGo115(ch, stats) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go new file mode 100644 index 000000000..6d152fbf1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go @@ -0,0 +1,31 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.15 +// +build go1.15 + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) { + ch <- c.maxIdleTimeClosed +} + +func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) { + ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go new file mode 100644 index 000000000..65235069d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go @@ -0,0 +1,27 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.15 +// +build !go1.15 + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) {} + +func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) {} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go new file mode 100644 index 000000000..3aa8d0590 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go @@ -0,0 +1,57 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewExpvarCollector returns a newly allocated expvar Collector. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewExpvarCollector(exports) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go new file mode 100644 index 000000000..edaa4e50b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go @@ -0,0 +1,69 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. +// +// NOTE: The problem is solved in Go 1.15, see +// https://github.com/golang/go/issues/19812 for the related Go issue. +func NewGoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewGoCollector() +} + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewBuildInfoCollector() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go new file mode 100644 index 000000000..24558f50a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go @@ -0,0 +1,56 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ + PidFn: opts.PidFn, + Namespace: opts.Namespace, + ReportErrors: opts.ReportErrors, + }) +} diff --git a/vendor/github.com/vmware/govmomi/.dockerignore b/vendor/github.com/vmware/govmomi/.dockerignore new file mode 100644 index 000000000..9f5fdc6b9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/.dockerignore @@ -0,0 +1,2 @@ +Dockerfile* +.*ignore \ No newline at end of file diff --git a/vendor/github.com/vmware/govmomi/.gitignore b/vendor/github.com/vmware/govmomi/.gitignore index 9099ba349..a7d3e830e 100644 --- a/vendor/github.com/vmware/govmomi/.gitignore +++ b/vendor/github.com/vmware/govmomi/.gitignore @@ -1,3 +1,12 @@ secrets.yml dist/ .idea/ + +# ignore tools binaries +/git-chglog + +# ignore RELEASE-specific CHANGELOG +/RELEASE_CHANGELOG.md + +# Ignore editor temp files +*~ diff --git a/vendor/github.com/vmware/govmomi/.golangci.yml b/vendor/github.com/vmware/govmomi/.golangci.yml new file mode 100644 index 000000000..d6392fb94 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/.golangci.yml @@ -0,0 +1,18 @@ +linters: + disable-all: true + enable: + - goimports + - govet + # Run with --fast=false for more extensive checks + fast: true +# override defaults +linters-settings: + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: github.com/vmware/govmomi +run: + timeout: 6m + skip-dirs: + - vim25/xml + - cns/types diff --git a/vendor/github.com/vmware/govmomi/.goreleaser.yml b/vendor/github.com/vmware/govmomi/.goreleaser.yml index a15c5d7d7..69b80be38 100644 --- a/vendor/github.com/vmware/govmomi/.goreleaser.yml +++ b/vendor/github.com/vmware/govmomi/.goreleaser.yml @@ -1,57 +1,148 @@ --- -project_name: govc +project_name: govmomi + builds: -- goos: - - linux - - darwin - - windows - - freebsd - goarch: - - amd64 - - 386 - env: - - CGO_ENABLED=0 - main: ./govc/main.go - binary: govc - flags: -compiler gc - ldflags: -X github.com/vmware/govmomi/govc/flags.GitVersion={{.Version}} -archive: - name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}' - format: tar.gz - format_overrides: - - goos: windows - format: zip - files: - - none* + - id: govc + goos: &goos-defs + - linux + - darwin + - windows + - freebsd + goarch: &goarch-defs + - amd64 + - arm + - arm64 + - mips64le + env: + - CGO_ENABLED=0 + - PKGPATH=github.com/vmware/govmomi/govc/flags + main: ./govc/main.go + binary: govc + ldflags: + - "-X {{.Env.PKGPATH}}.BuildVersion={{.Version}} -X {{.Env.PKGPATH}}.BuildCommit={{.ShortCommit}} -X {{.Env.PKGPATH}}.BuildDate={{.Date}}" + - id: vcsim + goos: *goos-defs + goarch: *goarch-defs + env: + - CGO_ENABLED=0 + main: ./vcsim/main.go + binary: vcsim + ldflags: + - "-X main.buildVersion={{.Version}} -X main.buildCommit={{.ShortCommit}} -X main.buildDate={{.Date}}" + +archives: + - id: govcbuild + builds: + - govc + name_template: "govc_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + replacements: &replacements + darwin: Darwin + linux: Linux + windows: Windows + freebsd: FreeBSD + amd64: x86_64 + format_overrides: &overrides + - goos: windows + format: zip + files: &extrafiles + - CHANGELOG.md + - LICENSE.txt + - README.md + + - id: vcsimbuild + builds: + - vcsim + name_template: "vcsim_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + replacements: *replacements + format_overrides: *overrides + files: *extrafiles + +snapshot: + name_template: "{{ .Tag }}-next" + checksum: - name_template: '{{ .ProjectName }}_{{ .Version }}_checksums.txt' + name_template: "checksums.txt" + changelog: sort: asc filters: exclude: - - '^docs:' - - '^test:' + - "^docs:" + - "^test:" - Merge pull request - Merge branch -brew: - github: - owner: govmomi - name: homebrew-tap - commit_author: - name: Alfred the Narwhal - email: cna-alfred@vmware.com - folder: Formula - homepage: "https://github.com/vmware/govmomi/blob/master/govc/README.md" - description: "govc is a vSphere CLI built on top of govmomi." - test: | - system "#{bin}/govc version" + +# upload disabled since it is maintained in homebrew-core +brews: + - name: govc + ids: + - govcbuild + tap: + owner: govmomi + name: homebrew-tap + # TODO: create token in specified tap repo, add as secret to govmomi repo and reference in release workflow + # token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}" + # enable once we do fully automated releases + skip_upload: true + commit_author: + name: Alfred the Narwhal + email: cna-alfred@vmware.com + folder: Formula + homepage: "https://github.com/vmware/govmomi/blob/master/govc/README.md" + description: "govc is a vSphere CLI built on top of govmomi." + test: | + system "#{bin}/govc version" + install: | + bin.install "govc" + - name: vcsim + ids: + - vcsimbuild + tap: + owner: govmomi + name: homebrew-tap + # TODO: create token in specified tap repo, add as secret to govmomi repo and reference in release workflow + # token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}" + # enable once we do fully automated releases + skip_upload: true + commit_author: + name: Alfred the Narwhal + email: cna-alfred@vmware.com + folder: Formula + homepage: "https://github.com/vmware/govmomi/blob/master/vcsim/README.md" + description: "vcsim is a vSphere API simulator built on top of govmomi." + test: | + system "#{bin}/vcsim -h" + install: | + bin.install "vcsim" + dockers: - - image: vmware/govc - goos: linux - goarch: amd64 - binary: govc - tag_templates: - - "{{ .Tag }}" - - "v{{ .Major }}" - - "v{{ .Major }}.{{ .Minor }}" - - latest + - image_templates: + - "vmware/govc:{{ .Tag }}" + - "vmware/govc:{{ .ShortCommit }}" + - "vmware/govc:latest" + dockerfile: Dockerfile.govc + ids: + - govc + build_flag_templates: + - "--pull" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.url=https://github.com/vmware/govmomi" + - "--platform=linux/amd64" + - image_templates: + - "vmware/vcsim:{{ .Tag }}" + - "vmware/vcsim:{{ .ShortCommit }}" + - "vmware/vcsim:latest" + dockerfile: Dockerfile.vcsim + ids: + - vcsim + build_flag_templates: + - "--pull" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.title={{.ProjectName}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.url=https://github.com/vmware/govmomi" + - "--platform=linux/amd64" diff --git a/vendor/github.com/vmware/govmomi/.mailmap b/vendor/github.com/vmware/govmomi/.mailmap index b26a72479..23392c161 100644 --- a/vendor/github.com/vmware/govmomi/.mailmap +++ b/vendor/github.com/vmware/govmomi/.mailmap @@ -1,28 +1,45 @@ +amanpaha amanpaha <84718160+amanpaha@users.noreply.github.com> +Amanda H. L. de Andrade Amanda Hager Lopes de Andrade Katz Amanda H. L. de Andrade amandahla Amit Bathla -Andrew Kutz akutz Andrew Kutz +Andrew Kutz akutz Andrew Kutz Andrew Kutz <101085+akutz@users.noreply.github.com> +Anfernee Yongkun Gui +Anfernee Yongkun Gui Yongkun Anfernee Gui +Anna Carrigan Anna +Balu Dontu BaluDontu Bruce Downs -Bruce Downs Bruce Downs +Bruce Downs Clint Greenwood Cédric Blomart Cédric Blomart cedric David Stark +Doug MacEachern dougm Eric Gray Eric Yutao eric Fabio Rapposelli +Faiyaz Ahmed Faiyaz Ahmed +Faiyaz Ahmed Faiyaz Ahmed +Faiyaz Ahmed Faiyaz Ahmed Henrik Hodne +Ian Eyberg Jeremy Canady +Jiatong Wang jiatongw +Lintong Jiang lintongj <55512168+lintongj@users.noreply.github.com> +Michael Gasch Michael Gasch +Mincho Tonev matonev <31008054+matonev@users.noreply.github.com> +Parveen Chahal Pieter Noordhuis +Saad Malik Takaaki Furukawa takaaki.furukawa Takaaki Furukawa tkak +Uwe Bessle Uwe Bessle +Uwe Bessle Uwe Bessle Vadim Egorov -Anfernee Yongkun Gui -Anfernee Yongkun Gui Yongkun Anfernee Gui +William Lam +Yun Zhou <41678287+gh05tn0va@users.noreply.github.com> +Zach G zach96guan Zach Tucker Zee Yang -Jiatong Wang jiatongw -Uwe Bessle Uwe Bessle -Uwe Bessle Uwe Bessle diff --git a/vendor/github.com/vmware/govmomi/.travis.yml b/vendor/github.com/vmware/govmomi/.travis.yml deleted file mode 100644 index 34777d48d..000000000 --- a/vendor/github.com/vmware/govmomi/.travis.yml +++ /dev/null @@ -1,95 +0,0 @@ -# Use the newer Travis-CI build templates based on the -# Debian Linux distribution "Trusty" release. -os: linux -dist: trusty - -# Disable sudo for all builds by default. This ensures all jobs use -# Travis-CI's containerized build environment unless specified otherwise. -# The container builds have *much* shorter queue times than the VM-based -# build environment on which the sudo builds depend. -sudo: false -services: false - -# Set the version of Go. -language: go -go: 1.11 - -# Always set the project's Go import path to ensure that forked -# builds get cloned to the correct location. -go_import_path: github.com/vmware/govmomi - -# Ensure all the jobs know where the temp directory is. -env: - global: TMPDIR=/tmp - -jobs: - include: - - # The "lint" stage runs the various linters against the project. - - &lint-stage - stage: lint - env: LINTER=govet - install: true - script: make "${LINTER}" - - - <<: *lint-stage - env: LINTER=goimports - - # The "build" stage verifies the program can be built against the - # various GOOS and GOARCH combinations found in the Go releaser - # config file, ".goreleaser.yml". - - &build-stage - stage: build - env: GOOS=linux GOARCH=amd64 - install: true - script: make install - - - <<: *build-stage - env: GOOS=linux GOARCH=386 - - - <<: *build-stage - env: GOOS=darwin GOARCH=amd64 - - <<: *build-stage - env: GOOS=darwin GOARCH=386 - - - <<: *build-stage - env: GOOS=freebsd GOARCH=amd64 - - <<: *build-stage - env: GOOS=freebsd GOARCH=386 - - - <<: *build-stage - env: GOOS=windows GOARCH=amd64 - - <<: *build-stage - env: GOOS=windows GOARCH=386 - - # The test stage executes the test target. - - stage: test - install: true - script: make test - - # The deploy stage deploys the build artifacts using goreleaser. - # - # This stage will only be activated when there is an annotated tag present - # or when the text "/ci-deploy" is present in the commit message. However, - # the "deploy" phase of the build will still only be executed on non-PR - # builds as that restriction is baked into Travis-CI. - # - # Finally, this stage requires the Travis-CI VM infrastructure in order to - # leverage Docker. This will increase the amount of time the jobs sit - # in the queue, waiting to be built. However, it's a necessity as Travis-CI - # only allows the use of Docker with VM builds. - - stage: deploy - if: tag IS present OR commit_message =~ /\/ci-deploy/ - sudo: required - services: docker - install: true - script: make install - after_success: docker login -u="${DOCKER_USERNAME}" -p="${DOCKER_PASSWORD}" - deploy: - - provider: script - skip_cleanup: true - script: curl -sL http://git.io/goreleaser | bash - addons: - apt: - update: true - packages: xmlstarlet diff --git a/vendor/github.com/vmware/govmomi/CHANGELOG.md b/vendor/github.com/vmware/govmomi/CHANGELOG.md index 97907f796..f39ad8f1d 100644 --- a/vendor/github.com/vmware/govmomi/CHANGELOG.md +++ b/vendor/github.com/vmware/govmomi/CHANGELOG.md @@ -1,331 +1,2960 @@ -# changelog -### 0.20.0 (2018-02-06) + +## [Release v0.26.1](https://github.com/vmware/govmomi/compare/v0.26.0...v0.26.1) -* Add vslm package for managing First Class Disks +> Release Date: 2021-08-16 -* Add LoginByToken to session KeepAliveHandler +### 🐞 Fix -### 0.19.0 (2018-09-30) +- [a366e352] Regenerated interface and type to include BaseAgencyConfigInfo. Closes: [#2545](https://github.com/vmware/govmomi/issues/2545). +- [d66ef551] explicitly import eam/simulator in test -* New vapi/rest and and vapi/tags packages +### 💡 Examples -* Allowing the use of STS for exchanging tokens +- [0c045a63] Add Property Wait example -* Add object.VirtualMachine.UUID method +### 💫 `govc` (CLI) -* SetRootCAs on the soap.Client returns an error for invalid certificates +- [012f5348] support updating items in library.update command +- [5743d5b6] Allow cluster.change to set ClusterDrsConfigInfo.vmotionRate +- [03210c91] Add object.collect type flag alias help -* Add ClusterComputeResource.MoveInto method +### 💫 `vcsim` (Simulator) -### 0.18.0 (2018-05-24) +- [ce6ed634] avoid race when fetching object Locker +- [31821de3] use 'domain-c' prefix for cluster moids +- [3625e6dd] propagate CustomizeVM MacAddress to Virtual NIC +- [389c0382] Take the host parameter into account while cloning a VM on a cluster +- [6fba1da7] Implement VSLM ExtendDisk_Task -* Add VirtualDiskManager wrapper to set UUID +### 🧹 Chore -* Add vmxnet2, pcnet32 and sriov to VirtualDeviceList.EthernetCardTypes +- [ddc2b47a] Include commit details in BREAKING section + +### ⚠️ BREAKING + +### 📖 Commits + +- [a366e352] fix: Regenerated interface and type to include BaseAgencyConfigInfo. Closes: [#2545](https://github.com/vmware/govmomi/issues/2545). +- [655f8e5c] testing for lab +- [ce6ed634] vcsim: avoid race when fetching object Locker +- [31821de3] vcsim: use 'domain-c' prefix for cluster moids +- [0aa1de31] make processing of mac addresses case insensitive ([#2510](https://github.com/vmware/govmomi/issues/2510)) +- [012f5348] govc: support updating items in library.update command +- [0c045a63] examples: Add Property Wait example +- [f30cefc3] Add Reauth flag to skip loading cached sessions +- [3625e6dd] vcsim: propagate CustomizeVM MacAddress to Virtual NIC +- [5743d5b6] govc: Allow cluster.change to set ClusterDrsConfigInfo.vmotionRate +- [389c0382] vcsim: Take the host parameter into account while cloning a VM on a cluster +- [7bf48333] Added CNS querySnapshots binding, simulator testcases and client testcases +- [017ab414] Added CreateSnapshots and DeleteSnapshots methods in cns simulator as well as their test cases +- [03210c91] govc: Add object.collect type flag alias help +- [d66ef551] fix: explicitly import eam/simulator in test +- [ddc2b47a] chore: Include commit details in BREAKING section +- [6fba1da7] vcsim: Implement VSLM ExtendDisk_Task + + +## [Release v0.26.0](https://github.com/vmware/govmomi/compare/v0.25.0...v0.26.0) + +> Release Date: 2021-06-03 + +### 🐞 Fix + +- [70b92d6d] Isolate SSO govc tests + +### 💡 Examples + +- [81b1de17] add toolbox Client.Run + +### 💫 `govc` (CLI) + +- [e37e515b] fix default guest.run path for unsupported Windows guests +- [0e7012d0] Add support for getting the VC proxy and no-proxy configuration ([#2435](https://github.com/vmware/govmomi/issues/2435)) +- [6afb8ff9] Change 'Maintenance Mode' printing in host.info + +### 💫 `vcsim` (Simulator) + +- [dff7f6bb] fix panic in QueryPerfCounter method +- [df9dfde1] set VirtualMachine ChangeTrackingSupported property ([#2468](https://github.com/vmware/govmomi/issues/2468)) +- [0c0ed98a] fix race in CloneVM_Task +- [d01d0fa7] add simulator.RunContainer method +- [8ab0c99a] untie datastore capacity from local fs +- [d31941c8] Modify Usage of README. (based on v0.25.0) +- [4fea687c] include all namespaces in /about info +- [bd3467d4] avoid edit device panic when DeviceInfo is nil +- [35a42af5] add guest operations process support + +### 📃 Documentation + +- [75eee8e4] update govc/USAGE and CONTRIBUTORS +- [1f795d21] Add blog to vcsim README +- [2719c229] Document linker and GOFLAGS for build vars + +### 🧹 Chore + +- [b4e1f965] Fix CONTRIB link in greeting +- [6f2597be] Update CHANGELOG implementation +- [d3944e17] Add and reorder commits in CHANGELOG +- [a796d3fc] Add make help target +- [8bc8fd28] Add issue and PR templates +- [60e33916] Document commit prefixes +- [cac1d8d7] Add issue greeting +- [0f1c3f89] Add WIP Action +- [921ad37a] Remove dep files +- [1d4ce94a] Clean up documentation +- [991278b9] Remove unused release script +- [16d8add5] Automate CHANGELOG + +### ⚠️ BREAKING + +### 📖 Commits + +- [75eee8e4] docs: update govc/USAGE and CONTRIBUTORS +- [dff7f6bb] vcsim: fix panic in QueryPerfCounter method +- [b4e1f965] chore: Fix CONTRIB link in greeting +- [df9dfde1] vcsim: set VirtualMachine ChangeTrackingSupported property ([#2468](https://github.com/vmware/govmomi/issues/2468)) +- [8cbe64c5] Fix: Protect FileProvider.files to avoid concurrent modification +- [6f2597be] chore: Update CHANGELOG implementation +- [0c0ed98a] vcsim: fix race in CloneVM_Task +- [81b1de17] examples: add toolbox Client.Run +- [d01d0fa7] vcsim: add simulator.RunContainer method +- [9223b5ae] Add toolbox.NewClient method +- [8ab0c99a] vcsim: untie datastore capacity from local fs +- [e37e515b] govc: fix default guest.run path for unsupported Windows guests +- [d3944e17] chore: Add and reorder commits in CHANGELOG +- [83e29c69] Update GitHub Test Action to use Makefile +- [a7f2c47e] Update vslm types vC build 17986435 (7.0U2HP4) +- [067374fd] Update sms types vC build 17986435 (7.0U2HP4) +- [d9f507f0] Update pbm types vC build 17986435 (7.0U2HP4) +- [c89f8dd5] Update eam types vC build 17986435 (7.0U2HP4) +- [b72432ef] Update vim25 types vC build 17986435 (7.0U2HP4) +- [e53716dd] Update gen.sh to vC build 17986435 (7.0U2HP4) +- [1f795d21] docs: Add blog to vcsim README +- [338f5529] Ran "make fix" to correct lint issues +- [23d77ba4] Add support for golangci-lint +- [d31941c8] vcsim: Modify Usage of README. (based on v0.25.0) +- [7046a0d3] Support pre-auth handlers in vC Sim +- [2e8860d1] Add CNS Snapshot APIs in govmomi +- [13d4d376] Remove vendor +- [a796d3fc] chore: Add make help target +- [ef824a20] Fix QueryAsyncVolume API test to be invoked only for vSphere 7.0.3 +- [c1900234] Fix data race in simulator.container.id +- [3212351e] install bin doc: permalink to latest version +- [7d779833] Setup CodeQL Analysis +- [566250ff] build(deps): bump nokogiri from 1.11.1 to 1.11.4 in /gen +- [f814a9ca] ESX Agent Manager (EAM) Client and Simulator +- [0e7012d0] govc: Add support for getting the VC proxy and no-proxy configuration ([#2435](https://github.com/vmware/govmomi/issues/2435)) +- [8bc8fd28] chore: Add issue and PR templates +- [4fea687c] vcsim: include all namespaces in /about info +- [bd3467d4] vcsim: avoid edit device panic when DeviceInfo is nil +- [70b92d6d] fix: Isolate SSO govc tests +- [80c9053e] Correcting broken Kubernetes vSphere Cloud Provider links +- [60e33916] chore: Document commit prefixes +- [57a141f3] Update govc test docs with act +- [ff578914] Use "vcsim uuidgen" for bats tests +- [61e12ddb] Only greet unassociated users +- [e39dfdc8] Add chore section to CHANGELOG +- [6afb8ff9] govc: Change 'Maintenance Mode' printing in host.info +- [cac1d8d7] chore: Add issue greeting +- [0f1c3f89] chore: Add WIP Action +- [921ad37a] chore: Remove dep files +- [2719c229] docs: Document linker and GOFLAGS for build vars +- [f3645a96] Clarify SetRootCAs behavior +- [c368e57f] toolbox: add hgfs freebsd stub +- [35a42af5] vcsim: add guest operations process support +- [64e55d81] Set RoundTripper in ssoadmin.NewClient +- [1d4ce94a] chore: Clean up documentation +- [991278b9] chore: Remove unused release script +- [16d8add5] chore: Automate CHANGELOG +- [e8805c92] Add NotFoundFault in cns types +- [8576fe27] Add queryAsyncVolume in simulator +- [4b9e0813] Simplify binary download instructions +- [3062dda9] Remove Travis CI +- [0be5632f] adding rancher to projects and reorganizing in alpha order +- [4a63a28c] Add bindings for CnsQueryAsyncVolume API +- [a8c80b93] Update READMEs with artifacts and Docker images +- [26c9690c] Fix VM Guest test and vet warnings +- [a32cd0b3] Add RELEASE documentation +- [cc660b0e] Increase govc tests timeout +- [d7bfaf4f] toolbox: move process management to its own package +- [e86da96e] Exclude go files in release tarball + + +## [Release v0.25.0](https://github.com/vmware/govmomi/compare/v0.24.1...v0.25.0) + +> Release Date: 2021-04-16 + +### 💫 `govc` (CLI) + +- [1ac314c3] add vm.customize -dns-suffix flag +- [60e0e895] update test images URL +- [cdf3ace6] log invalid NetworkMapping.Name with import.ova command +- [f8b3d8a8] revert pretty print pruning optimization +- [35481467] add library.update command +- [749c2239] add session.ls -S flag +- [93245c1e] add tree command +- [790f9ce6] include sub task fault messages on failure +- [d2a353ba] remove device.boot -firmware default +- [de6032e0] add '-trace' and '-verbose' flags + +### 💫 `vcsim` (Simulator) + +- [27d8d2e4] put verbose logging behind '-trace' flag +- [0ef4ae22] add moid value mapping mappings +- [082f9927] add vsan simulator + +### 📃 Documentation + +- [e18b601f] update for 0.25 release + +### ⚠️ BREAKING + +### 📖 Commits + +- [6fe8d60a] Fix folder write for govc container +- [e18b601f] docs: update for 0.25 release +- [1ac314c3] govc: add vm.customize -dns-suffix flag +- [22d911f6] Add Cron Docker Login Action +- [60e0e895] govc: update test images URL +- [3385b3e0] Add action to automate release +- [cdf3ace6] govc: log invalid NetworkMapping.Name with import.ova command +- [27d8d2e4] vcsim: put verbose logging behind '-trace' flag +- [f8b3d8a8] govc: revert pretty print pruning optimization +- [0ef4ae22] vcsim: add moid value mapping mappings +- [df08d4b2] First step towards release automation +- [f9b79a4f] export simulator.task.Wait() +- [917c4ec8] Ensure lock hand-off to simulator.Task goroutine +- [b45b228f] Simulator Task Delay +- [4b59b652] Make Simulator Tasks Async +- [bc52c793] Associate every registry lock with a Context. +- [054971ee] Wait until VM creation completes before adding to folder +- [35481467] govc: add library.update command +- [7403b470] Fix race in simulator's PropertyCollector +- [aadb2082] Add action to block WIP PRs +- [749c2239] govc: add session.ls -S flag +- [bc297330] [3ad0f415] Update Dockerfiles and .goreleaser.yml +- [082f9927] vcsim: add vsan simulator +- [8c38d56d] Add a stretched cluster conversion command. +- [408c531a] gofmt +- [e8a6b126] Update govc/flags/output.go +- [bf54a7c4] Add more badges +- [93245c1e] govc: add tree command +- [790f9ce6] govc: include sub task fault messages on failure +- [07e6e923] Use Github Actions Status Badges +- [d2a353ba] govc: remove device.boot -firmware default +- [4ed615f6] Add chainable RoundTripper support to endpoint clients +- [bab95d26] Add the vSAN stretched cluster reference. +- [6ff33db7] Fix events example +- [de6032e0] govc: add '-trace' and '-verbose' flags +- [7aae8dfb] Add support for calling vCenter for VLSM ExtendDisk and InflateDisk +- [2c57a8a3] Use Github Actions +- [0d155a61] Handling invalid reader size +- [b70542a5] Using progress reader in WriteFile + + +## [Release v0.24.1](https://github.com/vmware/govmomi/compare/v0.24.0...v0.24.1) + +> Release Date: 2021-03-17 + +### 💡 Examples + +- [38da87ff] add NetworkReference.EthernetCardBackingInfo + +### 💫 `govc` (CLI) + +- [63bb5c1e] metric command enhancements and fixes +- [7844a8c2] fix vm.migrate search index flags +- [5dacf627] fix cluster.usage Free field +- [f71bcf25] fix session curl when given a URL query +- [c954c2a5] validate license.remove +- [3b25c3f1] validate required library.clone NAME arg +- [344b7a30] note 'disk.ls -R' in volume.rm help +- [8942055a] add device.info examples to get disk UUID and vmdk +- [1b0af949] fix vm.markasvm examples +- [add8be5a] fix incorrect DeviceID value in device.pci.add +- [1f4f5640] add IPv6 support to vm.customize + +### 💫 `vcsim` (Simulator) + +- [25970530] fix Task.Info.Entity in RevertToSnapshot_Task +- [f0a045ac] set VirtualMachine.Config.CreateDate property +- [e51eb2b9] support EventFilterSpec.Time +- [8e45fa4a] emit CustomizationSucceeded event from CustomizeVM +- [c000bd6e] add DistributedVirtualSwitchManager +- [bcd5fa87] set VirtualDisk backing UUID +- [ccdcbe89] move product suffix in ServiceContent.About +- [393e7330] use linked list for EventHistoryCollector +- [9c4dc1a1] escape datastore name +- [9c2fe70f] record/replay EnvironmentBrowser.QueryConfigOption +- [5fd7e264] fix EventHistoryCollector fixes +- [0b755a59] switch bats tests from esx to vcsim env +- [3f1caf82] fixes for PowerCLI Get-VirtualNetwork + +### ⚠️ BREAKING + +### 📖 Commits + +- [7a276bf6] Add client test file for vslm package to validate register disk and cns create volume +- [dc29aa29] Fix performance.Manager.SampleByName truncation +- [18b53fd2] Added UpdateServiceMessage to Session Manager +- [63bb5c1e] govc: metric command enhancements and fixes +- [7844a8c2] govc: fix vm.migrate search index flags +- [7ab111bd] Drop clusterDistribution from vSAN 7.0 update and create spec elements +- [52631496] Marshal soapFaultError as JSON +- [f9e323a6] fix tab indentation +- [ae129ba0] add tests and implement HA Ready Condition +- [f34b3fa2] implement vSphere HA additional delay to VM HA overrides in govc +- [25970530] vcsim: fix Task.Info.Entity in RevertToSnapshot_Task +- [5dacf627] govc: fix cluster.usage Free field +- [b7f9e034] use correct enum for vm restart priority +- [d3d49a36] Add support for snapshot size calculations +- [61bfa072] Use a dash to indicate empty address +- [f0a045ac] vcsim: set VirtualMachine.Config.CreateDate property +- [4d9a9000] vim25: fix race in TemporaryNetworkError retry func +- [2f14e4b2] ovf: add Config and ExtraConfig to VirtualHardwareSection +- [50328780] Add vSAN 7.0U1 release constant +- [886573de] Update .goreleaser.yml +- [1cdb3164] Change the address type to automatic +- [667a3791] Remove duplicate cns bindings from vsan directory +- [f71bcf25] govc: fix session curl when given a URL query +- [d92f41de] Update volume ACL spec to add delete field +- [c954c2a5] govc: validate license.remove +- [2a4f8c8a] Update ConfigureVolumeACLs bindings in cns types +- [3b25c3f1] govc: validate required library.clone NAME arg +- [344b7a30] govc: note 'disk.ls -R' in volume.rm help +- [8942055a] govc: add device.info examples to get disk UUID and vmdk +- [1b0af949] govc: fix vm.markasvm examples +- [543e52ea] govc-env --save default +- [0a5f2a99] Little fix for "govc-env --save without config name" +- [4a7a0b45] gen: require nokogiri 1.11.0 or higher +- [add8be5a] govc: fix incorrect DeviceID value in device.pci.add +- [e51eb2b9] vcsim: support EventFilterSpec.Time +- [1f4f5640] govc: add IPv6 support to vm.customize +- [8e45fa4a] vcsim: emit CustomizationSucceeded event from CustomizeVM +- [c000bd6e] vcsim: add DistributedVirtualSwitchManager +- [bcd5fa87] vcsim: set VirtualDisk backing UUID +- [ccdcbe89] vcsim: move product suffix in ServiceContent.About +- [393e7330] vcsim: use linked list for EventHistoryCollector +- [9c4dc1a1] vcsim: escape datastore name +- [9c2fe70f] vcsim: record/replay EnvironmentBrowser.QueryConfigOption +- [5fd7e264] vcsim: fix EventHistoryCollector fixes +- [40a2cf0b] Skip tests that require docker on TravisCI +- [00ee2911] toolbox: skip tests that require Linux +- [0b755a59] vcsim: switch bats tests from esx to vcsim env +- [c6d5264a] Updated projects to include VMware Event Broker Appliance +- [ae44a547] ExampleCollector_Retrieve: Add missing err return +- [38da87ff] examples: add NetworkReference.EthernetCardBackingInfo +- [3f1caf82] vcsim: fixes for PowerCLI Get-VirtualNetwork +- [041a98b8] Fix DvsNetworkRuleQualifier interface +- [44e05fe4] SHA-1 deprecated in 2011, sha256sum for releases + + +## [Release v0.24.0](https://github.com/vmware/govmomi/compare/v0.23.1...v0.24.0) + +> Release Date: 2020-12-21 + +### 💡 Examples + +- [7178588c] add Folder.CreateVM +- [b4f7243b] add ContainerView retrieve clusters +- [1d21fff9] use session.Cache +- [8af8cef6] add events +- [e153061f] fix simulator.RunContainer on MacOSX + +### 💫 `govc` (CLI) + +- [1ec59a7c] fix build.sh git tag injection +- [31c0836e] add cluster.usage command +- [79514c81] add volume.ls -ds option +- [5e57b3f6] add device.boot -firmware option +- [4d82f0ff] add dvs.portgroup.{add,change} '-auto-expand' flag +- [4a1d05ac] fix object.collect ContainerView updates +- [e84d0d18] document vm.disk.attach -link behavior +- [70a9ced4] fix vm.clone panic when target VM already exists +- [a97e6168] support sparse backing in vm.disk.change +- [3380cd30] add CNS volume ls and rm commands +- [f7170fd2] add find -p flag +- [b40cdd8a] add storage.policy commands +- [d0111d28] add vm.console -wss flag +- [86374ea2] support multi value flags in host.esxcli command +- [ebcfa3d4] add namespace.cluster.ls command + +### 💫 `vcsim` (Simulator) + +- [bf80efab] include stderr in log message when volume import fails +- [1f3fb17c] include stderr in log message when container fails to start +- [e1c4b06e] rewrite vmfs path from saved model +- [bcdfb298] QueryConfigOptionEx Spec is optional +- [73e1af55] support inventory updates in ContainerView +- [a76123b2] set VirtualDevice.Connectable default for removable devices +- [b195dd57] add AuthorizationManager methods +- [a71f6c77] set VirtualDisk backing option defaults +- [fbde3866] add CloneVApp_Task support +- [aae78223] fix ListView.Modify +- [9cca13ab] avoid ViewManager.ModifyListView race +- [156b1cb0] add ListView to race test +- [55f6f952] add mechanism for modeling methods +- [69942fe2] fix save/load property collection for VmwareDistributedVirtualSwitch +- [33121b87] Honoring the instance uuid provided in spec by caller ([#2052](https://github.com/vmware/govmomi/issues/2052)) + +### ⚠️ BREAKING + +### 📖 Commits + +- [1ec59a7c] govc: fix build.sh git tag injection +- [164b9217] Update docs for 0.24 release +- [bf80efab] vcsim: include stderr in log message when volume import fails +- [4080e177] Add batch APIs for multiple tags to object +- [31c0836e] govc: add cluster.usage command +- [7178588c] examples: add Folder.CreateVM +- [2b962f3f] Add test for vsan host config +- [165d7cb4] Add function to get vsan host config +- [79514c81] govc: add volume.ls -ds option +- [f7ff79df] Add Configure ACL go bindings +- [1f3fb17c] vcsim: include stderr in log message when container fails to start +- [3b83040a] Add wrappers for retrieving vsan properties +- [12e8969c] Use gofmt +- [6454dbd4] Add vSAN 7.0 API bindings +- [6a216a52] Add vSAN 7.0 API bindings +- [be15ad6c] Regenerate against vSphere 7.0U1 release +- [5e57b3f6] govc: add device.boot -firmware option +- [e1c4b06e] vcsim: rewrite vmfs path from saved model +- [26635452] Change CnsCreateVolume to return PlacementResult for static volume provisioning. Also add unit test for this case. +- [4d82f0ff] govc: add dvs.portgroup.{add,change} '-auto-expand' flag +- [bcdfb298] vcsim: QueryConfigOptionEx Spec is optional +- [8b194c23] Add Placement object in CNS CreateVolume response. Add corresponding test. +- [b085fc33] Use available ctx in enable cluster network lookup +- [f6f336ab] Cleanup some redundant code for cluster namespace enabling +- [d04f2b49] change negative one to rand neg int32 +- [f819befd] go binding for CNS RelocateVolume API +- [ed93ea7d] fix the goimports validation error +- [f402c0e1] support trunk mode port group +- [ff575977] change key default from -1 to rand neg int32 vsphere 7 introduced a key collision detection error when adding devices com.vmware.vim.vpxd.vmprov.duplicateDeviceKey which causes -1 keys to return an error of duplicate if you try and add two devices in the same AddDevice call +- [39acef43] Add option to disable secure cookies with non-TLS endpoints +- [ae19e30f] simulator: fix container vm example +- [73e1af55] vcsim: support inventory updates in ContainerView +- [593cd20d] Add namespace.cluster.disable cmd + formatting fixes +- [782ed95c] Add namespace.cluster.enable cmd to govc +- [e7403032] Make ListStorageProfiles public -> for enabling clusters in govc +- [53965796] Adds support for enabling cluster namespaces via API +- [4a1d05ac] govc: fix object.collect ContainerView updates +- [e84d0d18] govc: document vm.disk.attach -link behavior +- [a76123b2] vcsim: set VirtualDevice.Connectable default for removable devices +- [b4f7243b] examples: add ContainerView retrieve clusters +- [b195dd57] vcsim: add AuthorizationManager methods +- [a71f6c77] vcsim: set VirtualDisk backing option defaults +- [1d21fff9] examples: use session.Cache +- [8af8cef6] examples: add events +- [3e2a8071] Add ClusterDistribution field for CNS telemetry and Drop optional fields not known to the prior releases +- [4acfb726] Fix for fatal error: concurrent map iteration and map write +- [01610887] Adding VsanQueryObjectIdentities and QueryVsanObjects +- [fbde3866] vcsim: add CloneVApp_Task support +- [70a9ced4] govc: fix vm.clone panic when target VM already exists +- [a97e6168] govc: support sparse backing in vm.disk.change +- [3380cd30] govc: add CNS volume ls and rm commands +- [f9d7bfdf] sts: fix SignRequest bodyhash for non-empty request body +- [7b4e997b] vapi: add WCP support bundle bindings +- [aae78223] vcsim: fix ListView.Modify +- [0e4bce43] Add AuthorizationManager.HasUserPrivilegeOnEntities wrapper +- [81207eab] vim25/xml: sync with Go 1.15 encoding/xml +- [f7170fd2] govc: add find -p flag +- [d49123c9] Add internal.InventoryPath helper +- [b40cdd8a] govc: add storage.policy commands +- [0c5cdd5d] add / remove pci passthrough device for one VM +- [d0111d28] govc: add vm.console -wss flag +- [94bc8497] Add sms generated types and methods +- [e153061f] examples: fix simulator.RunContainer on MacOSX +- [99fe9954] finder: simplify direct use of InventoryPath func +- [3760bd6c] Added Instant Clone feature Resolves: [#1392](https://github.com/vmware/govmomi/issues/1392) +- [86374ea2] govc: support multi value flags in host.esxcli command +- [9cca13ab] vcsim: avoid ViewManager.ModifyListView race +- [156b1cb0] vcsim: add ListView to race test +- [f903d5da] Add ExtendDisk and InflateDisk wrappers to vlsm/object_manager +- [073cc310] Add AttachDisk and DetachDisk wrappers for the virtualMachine object. +- [a0c7e829] vapi: add tags.Manager.GetAttachedTagsOnObjects example +- [378a24c4] Vsan Performance Data Collection API ([#2021](https://github.com/vmware/govmomi/issues/2021)) +- [55f6f952] vcsim: add mechanism for modeling methods +- [69942fe2] vcsim: fix save/load property collection for VmwareDistributedVirtualSwitch +- [fe3becfa] bats: test fixes for running on MacOSX +- [0422a070] Merge branch 'master' into pc/HardwareInfoNotReplicatingInCloning +- [9f12aae4] vapi: add Content Library example +- [33121b87] vcsim: Honoring the instance uuid provided in spec by caller ([#2052](https://github.com/vmware/govmomi/issues/2052)) +- [9a07942b] Setting hardware properties in clone VM spec from template VM +- [ebcfa3d4] govc: add namespace.cluster.ls command +- [11d45e54] vapi: add namespace management client and vcsim support +- [cdc44d5e] vapi: add helper support "/api" endpoint + + +## [Release v0.23.1](https://github.com/vmware/govmomi/compare/v0.23.0...v0.23.1) + +> Release Date: 2020-07-02 + +### 💡 Examples + +- [0bbb6a7d] add property.Collector.Retrieve example + +### 💫 `vcsim` (Simulator) + +- [0697d33f] add HostNetworkSystem.QueryNetworkHint +- [d7f4bba6] use HostNetworkSystem wrapper with -load flag +- [916b12e6] set HostSystem IP in cluster AddHost_Task +- [e63ec002] add PbmQueryAssociatedProfile method + +### ⚠️ BREAKING + +### 📖 Commits + +- [b7add48c] check if config isn't nil before returning an uuid +- [12955a6c] added support for returning array of BaseCnsVolumeOperationResult for QueryVolumeInfo API +- [0697d33f] vcsim: add HostNetworkSystem.QueryNetworkHint +- [a5c9e1f0] Merge branch 'master' into master +- [c14e3bc5] adding in link to OPS +- [d7f4bba6] vcsim: use HostNetworkSystem wrapper with -load flag +- [916b12e6] vcsim: set HostSystem IP in cluster AddHost_Task +- [e63ec002] vcsim: add PbmQueryAssociatedProfile method +- [0bbb6a7d] examples: add property.Collector.Retrieve example + + +## [Release v0.23.0](https://github.com/vmware/govmomi/compare/prerelease-v0.22.1-247-g770fcba2...v0.23.0) + +> Release Date: 2020-06-11 + +### 💫 `govc` (CLI) + +- [4f19eb6d] ipath search flag does not require a Datacenter + +### ⚠️ BREAKING + +### 📖 Commits + +- [b639ab4c] Update docs for 0.23 release +- [be7742f2] vapi: use header authentication in file Upload/Download +- [50846878] provided examples for vm.clone and host.esxcli +- [aa97c4d3] Add appliance log forwarding config handler and govc verb ([#1994](https://github.com/vmware/govmomi/issues/1994)) +- [4f19eb6d] govc: ipath search flag does not require a Datacenter + + +## [Release prerelease-v0.22.1-247-g770fcba2](https://github.com/vmware/govmomi/compare/v0.22.2...prerelease-v0.22.1-247-g770fcba2) + +> Release Date: 2020-05-29 + +### 💡 Examples + +- [0e4b487e] Fixed error is not logging in example.go +- [c17eb769] add ContainerView.Find + +### 💫 `govc` (CLI) + +- [10c22fd1] support raw object references in import.ova NetworkMapping +- [414c548d] support find with -customValue filter +- [0bf0e761] support VirtualApp with -pool flag +- [f1ae45f5] add -version flag to datastore.create command +- [43e4f8c2] add session.login -X flag +- [70b7e1b4] vm.clone ResourcePool is optional when -cluster is specified +- [2c5ff385] add REST support for session.login -cookie flag +- [7d66cf9a] fix host.info CPU usage +- [244a8369] add session.ls -r flag +- [6c68ccf2] add a VM template clone example +- [bb6ae4ab] ignore ManagedObjectNotFound errors in 'find' command +- [210541fe] remove ClientFlag.WithRestClient +- [75e9e80d] do not try to start a VM template +- [667e6fbe] add guest directory upload/download examples +- [167f5d83] add vm.change -uuid flag +- [bcd06cee] enable library.checkout and library.checkin by default +- [6f087ded] avoid truncation in object.collect +- [e9bb4772] add import.spec support for remote URLs +- [692c1008] support optional compute.policy.ls argument +- [814e4e5c] add vm.change '-memory-pin' flag +- [56e878a5] support nested groups in sso.group.update +- [84346733] add content library helpers +- [0ccfd912] add cluster.group.ls -l flag +- [ae84c494] use OutputFlag for import.spec +- [2dda4daa] add library.clone -ovf flag +- [519d302d] fix doc for -g flag (guest id) choices +- [e582cbd1] add object.collect -o flag +- [d2e6b7df] output formatting enhancements +- [e64c2423] add find -l flag +- [4db4430c] save sessions using sha256 ID + +### 💫 `vcsim` (Simulator) + +- [c3fe4f84] CreateSnapshotTask now returns moref in result +- [b0af443c] add lookup ServiceRegistration example +- [34734712] add AuthorizationManager.HasPrivilegeOnEntities +- [228e0a8f] traverse configManager.datastoreSystem in object.save +- [8acac02a] traverse configManager.virtualNicManager in object.save +- [8a4ab564] traverse configManager.networkSystem in object.save +- [4b8a5988] add extraConfigAlias table +- [a0fe825a] add EventHistoryCollector.ResetCollector implementation +- [558747b3] fixes for PowerCLI +- [9ae04495] apply ExtraConfig after devices +- [4286d7cd] add another test/example for DVS host member validation +- [7e24bfcb] validate DVS membership +- [853656fd] fix flaky library subscriber test +- [7426e2fd] avoid panic if ovf:capacityAllocationUnits is not present +- [55599668] support QueryConfigOptionEx GuestId param +- [67d593cc] VM templates do not have a ResourcePool +- [469e11b9] validate session key in TerminateSession method +- [88d298ff] unique MAC address for VM NICs +- [c4f820dd] create vmdk directory if needed +- [488205f0] support VMs with the same name +- [68349a27] support Folder in RelocateVM spec +- [ab1298d5] add guest operations support +- [7ffb9255] add HostStorageSystem support +- [77b31b84] avoid possible panic in UnregisterVM_Task +- [617c18e7] support tags with the same name +- [dfcf9437] add docs on generated inventory names +- [4cfc2905] add support for NSX backed networks + +### ⚠️ BREAKING + +### 📖 Commits + +- [7cdad997] Finder: support DistributedVirtualSwitch traversal +- [10c22fd1] govc: support raw object references in import.ova NetworkMapping +- [c3fe4f84] vcsim: CreateSnapshotTask now returns moref in result +- [b0af443c] vcsim: add lookup ServiceRegistration example +- [84f1b733] simulator: fix handling of nil Reference in container walk +- [b5b434b0] Adding sunProfileName in pbm.CapabilityProfileCreateSpec +- [2111324a] providing examples for govc guest.run +- [0eef3b29] Bump to vSphere version 7 +- [b277903e] go binding for CNS QueryVolumeInfo API +- [a048ea52] Move simulator lookupservice registration into ServiceInstance +- [30f1a71a] modify markdown link at simulator.Model +- [7881f541] Add REST session keep alive support +- [3aa9aaba] vapi: sync access to rest.Client.SessionID +- [0a53ac4b] simulator: refactor folder children operations +- [b9152f85] simulator: relax ResourcePool constraint for createVM operation +- [70e9d821] simulator: relax typing condition on RP parent +- [502b7efa] simulator: relax ViewManager typing constraints +- [634fdde1] simulator: remove data race in VM creation flow +- [6eda0169] simulator: protect datastore freespace updates against data races +- [414c548d] govc: support find with -customValue filter +- [487ca0d6] Add logic to return default HealthStatus in CnsCreateVolume. +- [0bf0e761] govc: support VirtualApp with -pool flag +- [f1ae45f5] govc: add -version flag to datastore.create command +- [d0751307] Add support for attach-tag-to-multiple-objects +- [5682b1f2] simulator: relax excessive type assertions in SearchIndex +- [39a4da90] Modify parenthesis for markdown link +- [34734712] vcsim: add AuthorizationManager.HasPrivilegeOnEntities +- [92d464b9] 1. Add retry for CNS Create API with backing disk url 2. Fix binding for CnsAlreadyRegisteredFault +- [235582fe] Add sample test for Create CNS API using backing disk Url path +- [b187863a] 1. Add BackingDiskUrlPath and CnsAlreadyFault go bindings to CNS APIs 2. Update CreateVolume CNS Util to include BackingDiskUrlPath +- [409279fa] Add GetProfileNameByID functionality to PBM +- [228e0a8f] vcsim: traverse configManager.datastoreSystem in object.save +- [8acac02a] vcsim: traverse configManager.virtualNicManager in object.save +- [8a4ab564] vcsim: traverse configManager.networkSystem in object.save +- [43e4f8c2] govc: add session.login -X flag +- [70b7e1b4] govc: vm.clone ResourcePool is optional when -cluster is specified +- [2c5ff385] govc: add REST support for session.login -cookie flag +- [6ccaf303] Add guest.FileManager.TransferURL test +- [03c7611e] Avoid possible nil pointer dereference in guest TransferURL +- [44a78f96] Fix delegated Holder-of-Key token signature +- [11b2aa1a] Update to vSphere 7 APIs +- [4b8a5988] vcsim: add extraConfigAlias table +- [a0fe825a] vcsim: add EventHistoryCollector.ResetCollector implementation +- [558747b3] vcsim: fixes for PowerCLI +- [9ae04495] vcsim: apply ExtraConfig after devices +- [7d66cf9a] govc: fix host.info CPU usage +- [4286d7cd] vcsim: add another test/example for DVS host member validation +- [515621d1] Revert to using sha1 for session cache file names +- [f103a87a] Default to separate session cache directories +- [7e24bfcb] vcsim: validate DVS membership +- [244a8369] govc: add session.ls -r flag +- [6c68ccf2] govc: add a VM template clone example +- [bb6ae4ab] govc: ignore ManagedObjectNotFound errors in 'find' command +- [853656fd] vcsim: fix flaky library subscriber test +- [571f64e7] Fix existing goimport issue +- [7426e2fd] vcsim: avoid panic if ovf:capacityAllocationUnits is not present +- [9e57f983] Add non-null HostLicensableResourceInfo to HostSystem +- [210541fe] govc: remove ClientFlag.WithRestClient +- [75e9e80d] govc: do not try to start a VM template +- [d9220e5d] simulator: add interface for VirtualDiskManager +- [55599668] vcsim: support QueryConfigOptionEx GuestId param +- [67d593cc] vcsim: VM templates do not have a ResourcePool +- [667e6fbe] govc: add guest directory upload/download examples +- [167f5d83] govc: add vm.change -uuid flag +- [bcd06cee] govc: enable library.checkout and library.checkin by default +- [9d4faa6d] Refactor govc session persistence into session/cache package +- [6f087ded] govc: avoid truncation in object.collect +- [7a1fef65] Remove Task from function names in Task struct receiver methods +- [dd839655] Add SetTaskState SetTaskDescription UpdateProgress to object package +- [469e11b9] vcsim: validate session key in TerminateSession method +- [af41ae09] Revert compute policy support +- [ad612b3e] Fix the types of errors returned from VSLM tasks to be their originl vim faults rather than just wrappers of localized error msg +- [9e82230f] Remove extra err check +- [e9bb4772] govc: add import.spec support for remote URLs +- [273aaf71] skip tests when env is not set +- [159c423c] removing usage of spew package +- [76caec95] vapi: prefer header authn to cookie authn +- [6c04cfa0] Dropping fields in entity metadata for 6.7u3 +- [8d15081f] using right version and namespace from sdk/vsanServiceVersions.xml for cns client. making cns/client.go backward compatible to vsan67u3 by dropping unknown elements +- [8dfb29f5] Add nil check for taskInfo result before typecasting CnsVolumeOperationBatchResult +- [d68bbf9b] fixing CnsFault go binding +- [5482bd07] syncing vmodl changes +- [3bcace84] fixing go binding for CnsVolumeOperationResult and CnsFault +- [3c756cbd] Fixing govmomi binding for CNS as per latest VMODL for CnsVsanFileShareBackingDetails. Also fixed cns/client_test.go accordingly. +- [4254df70] Adding new API to get cluster configuration +- [0eacb4ed] removing space before omitempty tag +- [59ce7e4a] Resolve bug in Simulator regarding BackingObjectDetails +- [6ad7e87d] Change the backingObjectDetails attribute to point to interface BaseCnsBackingObjectDetails +- [601f1ded] Add resize support +- [56049aa4] Updating go binding for vsan fileshare vmodl updates +- [af798c01] Add CnsQuerySelectionNameType and CnsKubernetesEntityType back +- [af2723fd] Add bindings for vSANFS and extend CNS bindings to support file volume +- [4e7b9b00] update taskClientVersion for vsphere 7.0 +- [692c1008] govc: support optional compute.policy.ls argument +- [a7d4a77d] Modified return type for Get policy +- [4007484e] Compute Policy support +- [88d298ff] vcsim: unique MAC address for VM NICs +- [814e4e5c] govc: add vm.change '-memory-pin' flag +- [de8bcf25] reset all for recursive calls fix format error +- [57efe91f] Fixed ContainerView.RetrieveWithFilter fetch all specs if empty list of properties given +- [5af5ac8d] Avoid possible panic in Filter.MatchProperty +- [85889777] Add vAPI create binding for compute policy +- [56e878a5] govc: support nested groups in sso.group.update +- [6f46ef8a] Added prefix toggle parameter to govc export.ovf +- [6d3196e4] Disk mode should override default value in vm.disk.attach +- [4be7a425] Replaced ClassOvfParams with ClassDeploymentOptionParams +- [c4f820dd] vcsim: create vmdk directory if needed +- [1ab6fe09] Add Content Library subscriptions support +- [488205f0] vcsim: support VMs with the same name +- [68349a27] vcsim: support Folder in RelocateVM spec +- [6a6a7875] Update CONTRIBUTING to have more info about running CI tests, checks. +- [a73c0d4f] Expose Soap client default transport (a.k.a. its http client default transport) +- [84346733] govc: add content library helpers +- [a225a002] build(deps): bump nokogiri from 1.10.4 to 1.10.8 in /gen +- [b4395d65] Avoid ServiceContent requirement in lookup.NewClient +- [c1e828cb] fix blog links +- [863430ba] toolbox: bump test VM memory for current CoreOS release +- [0ccfd912] govc: add cluster.group.ls -l flag +- [1af6ec1d] Add Namespace support to UseServiceVersion +- [ab1298d5] vcsim: add guest operations support +- [0e4b487e] examples: Fixed error is not logging in example.go +- [f36e13fc] Add Content Library item copy support +- [7ffb9255] vcsim: add HostStorageSystem support +- [ae84c494] govc: use OutputFlag for import.spec +- [2dda4daa] govc: add library.clone -ovf flag +- [77b31b84] vcsim: avoid possible panic in UnregisterVM_Task +- [519d302d] govc: fix doc for -g flag (guest id) choices +- [617c18e7] vcsim: support tags with the same name +- [e582cbd1] govc: add object.collect -o flag +- [0c6eafc1] Apply gomvomi vim25/xml changes +- [4da54375] Simplify ObjectName method +- [d2e6b7df] govc: output formatting enhancements +- [dfcf9437] vcsim: add docs on generated inventory names +- [e64c2423] govc: add find -l flag +- [4db4430c] govc: save sessions using sha256 ID +- [4cfc2905] vcsim: add support for NSX backed networks +- [c17eb769] examples: add ContainerView.Find +- [36056ae6] Import golang/go/src/encoding/xml v1.13.6 +- [346cf59a] Avoid encoding/xml import +- [9cbe57db] fix simulator disk manager fault message. +- [7f685c23] Add permissions for NoCryptoAdmin + + +## [Release v0.22.2](https://github.com/vmware/govmomi/compare/v0.22.1...v0.22.2) + +> Release Date: 2020-02-13 + +### ⚠️ BREAKING + +### 📖 Commits + +- [e7df0c11] Avoid ServiceContent requirement in lookup.NewClient + + +## [Release v0.22.1](https://github.com/vmware/govmomi/compare/v0.22.0...v0.22.1) + +> Release Date: 2020-01-13 + +### ⚠️ BREAKING + +### 📖 Commits + +- [da368950] Release version 0.22.1 +- [a62b12cf] Fix AttributeValue.C14N for 6.7u3 +- [c3d102b1] Add finder example for MultipleFoundError +- [802e5899] vapi: add CreateTag example +- [15630b90] vapi: Add cluster modules client and simulator + + +## [Release v0.22.0](https://github.com/vmware/govmomi/compare/v0.20.3...v0.22.0) + +> Release Date: 2020-01-10 + +### 💡 Examples + +- [72b1cd92] output VM names in performance example +- [f4b3cda7] add Common.Rename +- [dab4ab0d] add VirtualMachine.Customize +- [1828eee9] add VirtualMachine.CreateSnapshot +- [6ff7040e] fix flag parsing +- [cad9a8e2] add ExampleVirtualMachine_Reconfigure +- [9495f0d8] add CustomFieldManager.Set + +### 💫 `govc` (CLI) + +- [aed39212] guest -i flag only applies to ProcessManager +- [704b335f] add 5.0 to vm.create hardware version map +- [965109ae] guest.run improvements +- [ee28fcfd] add vm.customize multiple IP support +- [68b3ea9f] fix library.info output formatting +- [5bb7f391] add optional library.info details +- [d8ac7e51] handle xsd:string responses +- [31d3e357] add library.info details +- [182c84a3] fixup tasks formatting +- [08fb2b02] remove guest.run toolbox dependency +- [a727283f] default to simple esxcli format when hints fields is empty +- [204af3c5] add datacenter create/delete examples +- [f6c57ee7] fix vm.create doc regarding -on flag +- [8debfcc3] add device.boot -secure flag +- [2bb2a6ed] add doc on vm.info -r flag +- [e50368c6] avoid env for -cluster placement flag +- [f16eb276] add default library.create thumbprint +- [d8325f34] add thumbprint flag to library.create +- [0bad2bc2] add vm.power doc +- [45d322ea] support vm.customize without a managed spec +- [0a058e0f] fixup usage suggestions +- [3185f7bc] add vm.customize command +- [1b159e27] fix datacenter.info against nested folders +- [149ba7ad] add vm.change -latency flag +- [c35a532d] validate moref argument +- [3fb02b52] add guest.df command +- [fa755779] support library paths in tags.attach commands +- [2ddfb86b] add datastore.info -H flag +- [b3adfff2] add sso.group commands +- [b5372b0c] host.vnic.info -json support +- [4c41c167] add context to LoadX509KeyPair error +- [910dac72] add vm.change hot-add options +- [746c314e] change logs.download -default=false +- [05f946d4] increase guest.ps -X poll interval +- [cc10a075] add -options support to library.deploy +- [fe372923] rename vcenter.deploy to library.deploy +- [436d7a04] move library.item.update commands to library.session +- [e6514757] consolidate library commands +- [f8249ded] export Archive Path field +- [d2ab2782] add vm.change vpmc-enabled flag +- [e7b801c6] fix vm.change against templates +- [8a856429] fix option.set for int32 type values +- [81391309] add datastore.maintenance.{enter,exit} commands +- [18cb9142] FCD workarounds +- [665affe5] add datastore.cluster.info Description +- [7b7f2013] add permission.remove -f flag + +### 💫 `vcsim` (Simulator) + +- [198b97ca] propagate VirtualMachineCloneSpec.Template +- [168a6a04] add -trace-file option +- [32eeeb24] Get IP address on non-default container network +- [1427d581] avoid possible panic in VirtualMachine.Destroy_Task +- [067d58be] automatically set Context.Caller +- [9e8e9a5a] remove container volumes +- [6cc814b8] bind mount BIOS UUID DMI files +- [9aec1386] validate VirtualDisk UnitNumber +- [d7e43b4e] add Floppy Drive support to OVF manager +- [8646dace] properly initialize portgroup portKeys field +- [286bd5e9] add vim25 client helper to vapi simulator +- [c3163247] use VMX_ prefix for guestinfo env vars +- [a3a09c04] don't allow duplicate names for Folder/StoragePod +- [a0a2296e] pass guestinfo vars as env vars to container vms +- [903fe182] add CustomizationSpecManager support +- [eda6bf3b] simplify container vm arguments input +- [0ce9b0a1] update docs +- [7755fbda] add record/playback functionality +- [fe000674] add VirtualMachine.Rename_Task support +- [d87cd5ac] add feature examples +- [2cc33fa8] Ensure that extraConfig from clone spec is added to VM being cloned +- [70ad060e] use exported response helpers in vapi/simulator +- [1e7aa6c2] avoid ViewManager.ViewList +- [9b0db1c2] avoid race in ViewManager +- [28b5fc6c] use TLS in simulator.Run +- [f962095f] rename Example to Run +- [43d69860] add endpoint registration mechanism +- [c183577b] add PlaceVm support ([#1589](https://github.com/vmware/govmomi/issues/1589)) +- [b17f3a51] DefaultDatastoreID is optional in library deploy +- [774f3800] add support to override credentials +- [ecd7312b] fix host uuid +- [c25c41c1] use stable UUIDs for inventory objects +- [1345eeb8] Press any key to exit +- [ee14bd3d] Update NetworkInfo.Portgroup in simulator +- [5b5eaa70] remove httptest.serve flag +- [20c1873e] add library.deploy support +- [0b1ad552] add ovf manager +- [6684016f] fork httptest server package +- [48c1e0a5] add content library support +- [8543ea4f] set guest.toolsRunningStatus property + +### ⏮ Reverts + +- [7914609d] gen: retain omitempty field tag with int pointer types + +### ⚠️ BREAKING + +### 📖 Commits + +- [317707be] Update docs for 0.22 release +- [aed39212] govc: guest -i flag only applies to ProcessManager +- [22308123] Clarify DVS EthernetCardBackingInfo error message +- [a1c98f14] Add Content Library synchronization support +- [704b335f] govc: add 5.0 to vm.create hardware version map +- [4e907d99] Clarify System.Read privilege requirement for PortGroup backing +- [554d9284] Fix guest.FileManager.TransferURL cache +- [9b8da88a] Remove toolbox specific guest run implementation +- [965109ae] govc: guest.run improvements +- [ee28fcfd] govc: add vm.customize multiple IP support +- [40001828] Add OVF properties to library deploy ([#1755](https://github.com/vmware/govmomi/issues/1755)) +- [68b3ea9f] govc: fix library.info output formatting +- [198b97ca] vcsim: propagate VirtualMachineCloneSpec.Template +- [5bb7f391] govc: add optional library.info details +- [2509e907] Added the missing RetrieveSnapshotDetails API in VSLM ([#1763](https://github.com/vmware/govmomi/issues/1763)) +- [d8ac7e51] govc: handle xsd:string responses +- [45b3685d] Add library ItemType constants +- [f3e2c3ce] Add retry support for HTTP status codes +- [31d3e357] govc: add library.info details +- [182c84a3] govc: fixup tasks formatting +- [08fb2b02] govc: remove guest.run toolbox dependency +- [b10bcbf3] VSLM: fixed the missing param in the QueryChangedDiskArea API impl +- [168a6a04] vcsim: add -trace-file option +- [72b1cd92] examples: output VM names in performance example +- [32eeeb24] vcsim: Get IP address on non-default container network +- [f9f69237] Move to cs.identity service type for sso admin endpoint +- [1427d581] vcsim: avoid possible panic in VirtualMachine.Destroy_Task +- [067d58be] vcsim: automatically set Context.Caller +- [a727283f] govc: default to simple esxcli format when hints fields is empty +- [08adb5d6] Move to cs.identity service type for sts endpoint +- [9e8e9a5a] vcsim: remove container volumes +- [6cc814b8] vcsim: bind mount BIOS UUID DMI files +- [e793289c] Content Library: add CheckOuts support +- [66c9b10c] Content Library: VM Template support +- [f4b3cda7] examples: add Common.Rename +- [19a726f7] Pass vm.Config.Uuid into the "VM" container via an env var +- [204af3c5] govc: add datacenter create/delete examples +- [dab4ab0d] examples: add VirtualMachine.Customize +- [f6c57ee7] govc: fix vm.create doc regarding -on flag +- [8debfcc3] govc: add device.boot -secure flag +- [9aec1386] vcsim: validate VirtualDisk UnitNumber +- [7914609d] Revert "gen: retain omitempty field tag with int pointer types" +- [9b2c5cc6] Add CustomizationSpecManager.Info method and example +- [d7e43b4e] vcsim: add Floppy Drive support to OVF manager +- [0bf21ec2] Implement some missing methods ("*All*" variants) on SearchIndex MOB +- [2bb2a6ed] govc: add doc on vm.info -r flag +- [8646dace] vcsim: properly initialize portgroup portKeys field +- [e50368c6] govc: avoid env for -cluster placement flag +- [91b1e0a7] Add ability to set DVS discovery protocol on create and change +- [1e130141] Move to Go 1.13 +- [f16eb276] govc: add default library.create thumbprint +- [d8325f34] govc: add thumbprint flag to library.create +- [62c20113] Fix hostsystem ManagementIPs call +- [c4a3908f] Update DVS change to use finder.Network for a single object +- [ee6fe09d] Fix usage instructions +- [5e6f5e3f] gen: retain omitempty field tag with int pointer types +- [286bd5e9] vcsim: add vim25 client helper to vapi simulator +- [841386f1] Add ability to change a vnic on a host +- [391dd80b] Add ability to change the MTU on a DVS that has already been created +- [26a45d61] Change MTU param to use flags.NewInt32 as the type +- [dbcfc3a8] Add MTU flag for DVS creation +- [0399353f] Generate pointer type for ResourceReductionToToleratePercent +- [3f6b8ef5] Add nil checks for all HostConfigManager references +- [c3163247] vcsim: use VMX_ prefix for guestinfo env vars +- [5381f171] Add option to follow all struct fields in mo.References +- [04e4835c] Refactor session KeepAlive tests to use vcsim +- [7391c241] Avoid possible deadlock in KeepAliveHandler +- [41422ea4] build(deps): bump nokogiri from 1.6.3.1 to 1.10.4 in /gen +- [a3a09c04] vcsim: don't allow duplicate names for Folder/StoragePod +- [4c72d2e9] Add a method to update ports on a distributed virtual switch +- [0bad2bc2] govc: add vm.power doc +- [45d322ea] govc: support vm.customize without a managed spec +- [0a058e0f] govc: fixup usage suggestions +- [a0a2296e] vcsim: pass guestinfo vars as env vars to container vms +- [903fe182] vcsim: add CustomizationSpecManager support +- [eda6bf3b] vcsim: simplify container vm arguments input +- [0ce9b0a1] vcsim: update docs +- [c538d867] adding managed obj type to table +- [3185f7bc] govc: add vm.customize command +- [b2a7b47e] Include object.save directory in output +- [e8281f87] Initial support for hybrid Model.Load +- [7755fbda] vcsim: add record/playback functionality +- [8a3fa4f2] set stable vsan client version +- [9eaac5cb] Avoid empty principal in HoK token request +- [4a8da68d] Allow sending multiple characters through -c and name the keys +- [3e3d3515] add simple command list filter +- [fe000674] vcsim: add VirtualMachine.Rename_Task support +- [9166bbdb] support two tags with the same name +- [344653c1] added log type and password scrubber +- [d87cd5ac] vcsim: add feature examples +- [30fc2225] Report errors when cdrom.insert fails +- [a94f2d3a] vslm: fix to throw errors on tasks that are completed with error state +- [37054f03] added IsTemplate vm helper +- [d7aeb628] Fix object.collect with moref argument +- [0765aa63] add GetInventoryPath to NetworkReference interface +- [9fb975b0] Fix description of vm.keystrokes +- [234aaf53] vapi: support DeleteLibrary with subscribed libraries +- [2cc33fa8] vcsim: Ensure that extraConfig from clone spec is added to VM being cloned +- [70ad060e] vcsim: use exported response helpers in vapi/simulator +- [b069efc0] vapi: refactor for external API implementations +- [1e7aa6c2] vcsim: avoid ViewManager.ViewList +- [9b0db1c2] vcsim: avoid race in ViewManager +- [bd298f43] a failing testcase that triggers with -race test +- [03422dd2] vapi: expand internal path constants +- [d296a5f8] Support HoK tokens with Interactive Users +- [c6226542] Fix error check in session.Secret +- [28b5fc6c] vcsim: use TLS in simulator.Run +- [f9b4bb05] Replace LoadRetrievePropertiesResponse with LoadObjectContent +- [d84679eb] Add VirtualHardwareSection.StorageItem +- [a23a5cb1] Check whether there's a NIC before updating guest.ipAddress +- [8a069c27] Add interactiveSession flag +- [25526b21] vm.keystrokes -s (Allow spaces) +- [1828eee9] examples: add VirtualMachine.CreateSnapshot +- [ca3763e7] vapi: return info with current session query +- [f962095f] vcsim: rename Example to Run +- [43d69860] vcsim: add endpoint registration mechanism +- [1b159e27] govc: fix datacenter.info against nested folders +- [c183577b] vcsim: add PlaceVm support ([#1589](https://github.com/vmware/govmomi/issues/1589)) +- [3e71d6be] Add ResourcePool.Owner method +- [b17f3a51] vcsim: DefaultDatastoreID is optional in library deploy +- [68980704] Update generated code to vSphere 6.7u3 +- [7416741c] Add VirtualMachine.QueryChangedDiskAreas(). +- [8ef87890] Content library: support library ID in Finder +- [e373feb8] Add option to propagate MissingSet faults in property.WaitForUpdates +- [6ff7040e] examples: fix flag parsing +- [149ba7ad] govc: add vm.change -latency flag +- [c35a532d] govc: validate moref argument +- [54df157b] Add content library subscription support +- [b86466b7] Fix deadlock for keep alive handlers that attempt log in +- [9ad64557] CNS go bindings +- [9de3b854] Add simulator.Model.Run example +- [4285b614] Include url in Client.Download error +- [caf0b6b3] vcsa: update to 6.7 U3 +- [7ac56b64] Update vcsim Readme.md +- [48ef35df] Update README.md +- [a40837d8] Use gnu xargs in bats tests on Darwin +- [51ad97e1] Add FetchCapabilityMetadata method to Pbm client +- [d124bece] Add v4 option to VirtualMachine.WaitForIP +- [a5a429c0] Add support for the cis session get method +- [4513735f] Don't limit library.Finder to local libraries +- [cad9a8e2] examples: add ExampleVirtualMachine_Reconfigure +- [3fb02b52] govc: add guest.df command +- [a0fef816] Update docs for 0.21 release +- [a38f6e87] Content library related cleanups +- [e4024e9c] Fix library AddLibraryItemFileFromURI fingerprint +- [fa755779] govc: support library paths in tags.attach commands +- [5e8cb495] Fixed type bug in global_object_manager Task.QueryResult +- [4a67dc73] govcsim: Support Default UplinkTeamingPolicy in DVSPG +- [9da2362d] Added missing field in VslmExtendDisk_Task in ExtendDisk method +- [91377d77] Add Juju to projects using govmomi +- [f9026a84] VSLM FCD Global Object Manager client for 6.7U2+ +- [9495f0d8] examples: add CustomFieldManager.Set +- [bb170705] govcsim: Create datastore as accessible +- [35d0b7d3] Set the InventoryPath of the folder object in DefaultFolder ([#1515](https://github.com/vmware/govmomi/issues/1515)) +- [2d13a357] Add govmomi performance example +- [2ddfb86b] govc: add datastore.info -H flag +- [55da29e5] govcsim: Set datastore status as normal +- [600e9f7c] Add various govmomi client examples +- [5cccd732] Add http source support to library.import +- [99dd5947] Goreleaser update for multiple archives +- [b3adfff2] govc: add sso.group commands +- [5889d091] tags API: add methods for association of multiple tags/objects +- [b5372b0c] govc: host.vnic.info -json support +- [9b7688e0] Add method that sets vim version to the endpoint service version +- [fe3488f5] Fix tls config in soap.NewServiceClient +- [4c41c167] govc: add context to LoadX509KeyPair error +- [d7430825] Support external PSC lookup service +- [774f3800] vcsim: add support to override credentials +- [47c9c070] Fix HostNetworkSystem.QueryNetworkHint return value +- [910dac72] govc: add vm.change hot-add options +- [4606125e] Fix json request tracing +- [746c314e] govc: change logs.download -default=false +- [05f946d4] govc: increase guest.ps -X poll interval +- [77cb9df5] Add library export support +- [cc10a075] govc: add -options support to library.deploy +- [ecd7312b] vcsim: fix host uuid +- [c25c41c1] vcsim: use stable UUIDs for inventory objects +- [322d9629] Fix pbm field type lookup +- [1345eeb8] vcsim: Press any key to exit +- [a4f58ac6] Update examples to use examples.Run method +- [a31db862] Add permanager example +- [384b1b95] Fix port signature in REST endpoint token auth +- [c222666f] Default to running against vcsim in examples +- [199e737b] Add generated vslm types and methods +- [ee14bd3d] vcsim: Update NetworkInfo.Portgroup in simulator +- [dc631a2d] Format import statement +- [f133c9e9] Fix paths in vsan/methods +- [d8e7cc75] Update copy rights +- [62412641] Add vsan bindings +- [fc3f0e9d] Support resignature of vmfs snapshots ([#1442](https://github.com/vmware/govmomi/issues/1442)) +- [fe372923] govc: rename vcenter.deploy to library.deploy +- [436d7a04] govc: move library.item.update commands to library.session +- [e6514757] govc: consolidate library commands +- [f8249ded] govc: export Archive Path field +- [8a823c52] vcsa: bump to 6.7u2 +- [5b5eaa70] vcsim: remove httptest.serve flag +- [466dc5b2] Update to vSphere 6.7u2 API +- [e9f80882] Add error check to VirtualMachine.WaitForNetIP +- [5611aaa2] Add ovftool support +- [20c1873e] vcsim: add library.deploy support +- [0b1ad552] vcsim: add ovf manager +- [d2ab2782] govc: add vm.change vpmc-enabled flag +- [e7b801c6] govc: fix vm.change against templates +- [8a856429] govc: fix option.set for int32 type values +- [9155093e] Typo and->an +- [81391309] govc: add datastore.maintenance.{enter,exit} commands +- [1a857b94] Add support to reconcile FCD datastore inventory +- [18cb9142] govc: FCD workarounds +- [499a8828] Fix staticcheck issues value of `XXX` is never used +- [665affe5] govc: add datastore.cluster.info Description +- [546e8897] Add error check for deferred functions +- [367c8743] Fix bug with multiple tags in category +- [7b7f2013] govc: add permission.remove -f flag +- [87bc0c85] Makefile: Fix govet target using go1.12 +- [791e5434] travis.yml: Update from golang 1.11 to 1.12 +- [a86a42a2] travis.yml: Update from Ubuntu Trusty to Xenial +- [d92ee75e] Report local Datastore back as type OTHER +- [6684016f] vcsim: fork httptest server package +- [48c1e0a5] vcsim: add content library support +- [69faa2de] Make PostEvent TaskInfo param optional +- [608ad29f] Omit namespace tag in generated method body response types +- [a7c03228] Fix codespell issues +- [728e77db] Fix a race in NewServer(). +- [8543ea4f] vcsim: set guest.toolsRunningStatus property +- [e3143407] Fix elseif gocritic issues +- [89b53312] Fix gocritic emptyStringTest issues +- [63ba9232] Fix some trivial gocritic issues +- [0b8d0ee7] simulator/host_datastore_browser.go: remove commented out code +- [6c17d66c] Fix some staticcheck issues +- [d45b5f34] Fix some gosimple issues +- [90e501a6] Correct the year in the govc changelog +- [8082a261] Update XDR to use fork +- [e94ec246] govc/USAGE.md: Update documentation +- [3fde3319] snapshot.tree: Show snapshots description +- [1d6f743b] Fix year in changelog +- [39b2c871] support customize vm folder in ovf deploy +- [3ad203d3] Use rest.Client for library uploads +- [5d24c38c] lib/finder: Support filenames with "/" +- [087f09f9] govc library: use govc/flags for Datastore and ResourcePool +- [d1a7f491] Remove nested progress.Tee usage +- [7312711e] govc/vm/*: Fix some gosec Errors unhandled issues +- [88601bb7] vcsim/*: Fix Errors unhandled issues +- [61d04b46] session/*: Fix Errors unhandled issues +- [f9a22349] vmdk/*: Fix gosec Errors unhandled issues +- [ca9b71a9] Fix gosec Expect directory permissions to be 0750 or less issues +- [6083e891] Fix gosec potential file inclusion via variable issues +- [38091bf8] Build changes needed for content library +- [885d4b44] Content library additions/finder +- [3fb72d1a] Add support for content library +- [64f2a5ea] Fix API Version check. +- [718331e3] govc/*: Fix some staticcheck issues +- [ba7923ae] Fix all staticcheck "error strings should not be capitalized" issues +- [ed32a917] simulator/*: Fix some staticcheck issues +- [f71d4efb] govc/vm/*: Fix staticcheck issues +- [3d77e2b1] vim25/*: Fix staticcheck issues +- [d711005a] .gitignore: add editor files *~ +- [43ff04f1] Fix [#1173](https://github.com/vmware/govmomi/issues/1173) +- [562aa0db] Go Mod Support + + +## [Release v0.20.3](https://github.com/vmware/govmomi/compare/prerelease-v0.21.0-58-g8d28646...v0.20.3) + +> Release Date: 2019-10-08 + +### ⚠️ BREAKING + +### 📖 Commits + +- [fdd27786] Fix tls config in soap.NewServiceClient +- [bd9cfd18] Set the InventoryPath of the folder object in DefaultFolder ([#1515](https://github.com/vmware/govmomi/issues/1515)) +- [4514987f] Fix port signature in REST endpoint token auth + + +## [Release prerelease-v0.21.0-58-g8d28646](https://github.com/vmware/govmomi/compare/v0.21.0...prerelease-v0.21.0-58-g8d28646) + +> Release Date: 2019-09-08 + +### 💡 Examples + +- [1828eee9] add VirtualMachine.CreateSnapshot +- [6ff7040e] fix flag parsing +- [cad9a8e2] add ExampleVirtualMachine_Reconfigure + +### 💫 `govc` (CLI) + +- [1b159e27] fix datacenter.info against nested folders +- [149ba7ad] add vm.change -latency flag +- [c35a532d] validate moref argument +- [3fb02b52] add guest.df command + +### 💫 `vcsim` (Simulator) + +- [f962095f] rename Example to Run +- [43d69860] add endpoint registration mechanism +- [c183577b] add PlaceVm support ([#1589](https://github.com/vmware/govmomi/issues/1589)) +- [b17f3a51] DefaultDatastoreID is optional in library deploy + +### ⚠️ BREAKING + +### 📖 Commits + +- [1828eee9] examples: add VirtualMachine.CreateSnapshot +- [ca3763e7] vapi: return info with current session query +- [f962095f] vcsim: rename Example to Run +- [43d69860] vcsim: add endpoint registration mechanism +- [1b159e27] govc: fix datacenter.info against nested folders +- [c183577b] vcsim: add PlaceVm support ([#1589](https://github.com/vmware/govmomi/issues/1589)) +- [3e71d6be] Add ResourcePool.Owner method +- [b17f3a51] vcsim: DefaultDatastoreID is optional in library deploy +- [68980704] Update generated code to vSphere 6.7u3 +- [7416741c] Add VirtualMachine.QueryChangedDiskAreas(). +- [8ef87890] Content library: support library ID in Finder +- [e373feb8] Add option to propagate MissingSet faults in property.WaitForUpdates +- [6ff7040e] examples: fix flag parsing +- [149ba7ad] govc: add vm.change -latency flag +- [c35a532d] govc: validate moref argument +- [54df157b] Add content library subscription support +- [b86466b7] Fix deadlock for keep alive handlers that attempt log in +- [9ad64557] CNS go bindings +- [9de3b854] Add simulator.Model.Run example +- [4285b614] Include url in Client.Download error +- [caf0b6b3] vcsa: update to 6.7 U3 +- [7ac56b64] Update vcsim Readme.md +- [48ef35df] Update README.md +- [a40837d8] Use gnu xargs in bats tests on Darwin +- [51ad97e1] Add FetchCapabilityMetadata method to Pbm client +- [d124bece] Add v4 option to VirtualMachine.WaitForIP +- [a5a429c0] Add support for the cis session get method +- [4513735f] Don't limit library.Finder to local libraries +- [cad9a8e2] examples: add ExampleVirtualMachine_Reconfigure +- [3fb02b52] govc: add guest.df command + + +## [Release v0.21.0](https://github.com/vmware/govmomi/compare/v0.20.2...v0.21.0) + +> Release Date: 2019-07-24 + +### 💡 Examples + +- [9495f0d8] add CustomFieldManager.Set + +### 💫 `govc` (CLI) + +- [fa755779] support library paths in tags.attach commands +- [2ddfb86b] add datastore.info -H flag +- [b3adfff2] add sso.group commands +- [b5372b0c] host.vnic.info -json support +- [4c41c167] add context to LoadX509KeyPair error +- [910dac72] add vm.change hot-add options +- [746c314e] change logs.download -default=false +- [05f946d4] increase guest.ps -X poll interval +- [cc10a075] add -options support to library.deploy +- [fe372923] rename vcenter.deploy to library.deploy +- [436d7a04] move library.item.update commands to library.session +- [e6514757] consolidate library commands +- [f8249ded] export Archive Path field +- [d2ab2782] add vm.change vpmc-enabled flag +- [e7b801c6] fix vm.change against templates +- [8a856429] fix option.set for int32 type values +- [81391309] add datastore.maintenance.{enter,exit} commands +- [18cb9142] FCD workarounds +- [665affe5] add datastore.cluster.info Description +- [7b7f2013] add permission.remove -f flag + +### 💫 `vcsim` (Simulator) + +- [774f3800] add support to override credentials +- [ecd7312b] fix host uuid +- [c25c41c1] use stable UUIDs for inventory objects +- [1345eeb8] Press any key to exit +- [ee14bd3d] Update NetworkInfo.Portgroup in simulator +- [5b5eaa70] remove httptest.serve flag +- [20c1873e] add library.deploy support +- [0b1ad552] add ovf manager +- [6684016f] fork httptest server package +- [48c1e0a5] add content library support +- [8543ea4f] set guest.toolsRunningStatus property + +### ⚠️ BREAKING + +### 📖 Commits + +- [a0fef816] Update docs for 0.21 release +- [a38f6e87] Content library related cleanups +- [e4024e9c] Fix library AddLibraryItemFileFromURI fingerprint +- [fa755779] govc: support library paths in tags.attach commands +- [5e8cb495] Fixed type bug in global_object_manager Task.QueryResult +- [4a67dc73] govcsim: Support Default UplinkTeamingPolicy in DVSPG +- [9da2362d] Added missing field in VslmExtendDisk_Task in ExtendDisk method +- [91377d77] Add Juju to projects using govmomi +- [f9026a84] VSLM FCD Global Object Manager client for 6.7U2+ +- [9495f0d8] examples: add CustomFieldManager.Set +- [bb170705] govcsim: Create datastore as accessible +- [35d0b7d3] Set the InventoryPath of the folder object in DefaultFolder ([#1515](https://github.com/vmware/govmomi/issues/1515)) +- [2d13a357] Add govmomi performance example +- [2ddfb86b] govc: add datastore.info -H flag +- [55da29e5] govcsim: Set datastore status as normal +- [600e9f7c] Add various govmomi client examples +- [5cccd732] Add http source support to library.import +- [99dd5947] Goreleaser update for multiple archives +- [b3adfff2] govc: add sso.group commands +- [5889d091] tags API: add methods for association of multiple tags/objects +- [b5372b0c] govc: host.vnic.info -json support +- [9b7688e0] Add method that sets vim version to the endpoint service version +- [fe3488f5] Fix tls config in soap.NewServiceClient +- [4c41c167] govc: add context to LoadX509KeyPair error +- [d7430825] Support external PSC lookup service +- [774f3800] vcsim: add support to override credentials +- [47c9c070] Fix HostNetworkSystem.QueryNetworkHint return value +- [910dac72] govc: add vm.change hot-add options +- [4606125e] Fix json request tracing +- [746c314e] govc: change logs.download -default=false +- [05f946d4] govc: increase guest.ps -X poll interval +- [77cb9df5] Add library export support +- [cc10a075] govc: add -options support to library.deploy +- [ecd7312b] vcsim: fix host uuid +- [c25c41c1] vcsim: use stable UUIDs for inventory objects +- [322d9629] Fix pbm field type lookup +- [1345eeb8] vcsim: Press any key to exit +- [a4f58ac6] Update examples to use examples.Run method +- [a31db862] Add permanager example +- [384b1b95] Fix port signature in REST endpoint token auth +- [c222666f] Default to running against vcsim in examples +- [199e737b] Add generated vslm types and methods +- [ee14bd3d] vcsim: Update NetworkInfo.Portgroup in simulator +- [dc631a2d] Format import statement +- [f133c9e9] Fix paths in vsan/methods +- [d8e7cc75] Update copy rights +- [62412641] Add vsan bindings +- [fc3f0e9d] Support resignature of vmfs snapshots ([#1442](https://github.com/vmware/govmomi/issues/1442)) +- [fe372923] govc: rename vcenter.deploy to library.deploy +- [436d7a04] govc: move library.item.update commands to library.session +- [e6514757] govc: consolidate library commands +- [f8249ded] govc: export Archive Path field +- [8a823c52] vcsa: bump to 6.7u2 +- [5b5eaa70] vcsim: remove httptest.serve flag +- [466dc5b2] Update to vSphere 6.7u2 API +- [e9f80882] Add error check to VirtualMachine.WaitForNetIP +- [5611aaa2] Add ovftool support +- [20c1873e] vcsim: add library.deploy support +- [0b1ad552] vcsim: add ovf manager +- [d2ab2782] govc: add vm.change vpmc-enabled flag +- [e7b801c6] govc: fix vm.change against templates +- [8a856429] govc: fix option.set for int32 type values +- [9155093e] Typo and->an +- [81391309] govc: add datastore.maintenance.{enter,exit} commands +- [1a857b94] Add support to reconcile FCD datastore inventory +- [18cb9142] govc: FCD workarounds +- [499a8828] Fix staticcheck issues value of `XXX` is never used +- [665affe5] govc: add datastore.cluster.info Description +- [546e8897] Add error check for deferred functions +- [367c8743] Fix bug with multiple tags in category +- [7b7f2013] govc: add permission.remove -f flag +- [87bc0c85] Makefile: Fix govet target using go1.12 +- [791e5434] travis.yml: Update from golang 1.11 to 1.12 +- [a86a42a2] travis.yml: Update from Ubuntu Trusty to Xenial +- [d92ee75e] Report local Datastore back as type OTHER +- [6684016f] vcsim: fork httptest server package +- [48c1e0a5] vcsim: add content library support +- [69faa2de] Make PostEvent TaskInfo param optional +- [608ad29f] Omit namespace tag in generated method body response types +- [a7c03228] Fix codespell issues +- [728e77db] Fix a race in NewServer(). +- [8543ea4f] vcsim: set guest.toolsRunningStatus property +- [e3143407] Fix elseif gocritic issues +- [89b53312] Fix gocritic emptyStringTest issues +- [63ba9232] Fix some trivial gocritic issues +- [0b8d0ee7] simulator/host_datastore_browser.go: remove commented out code +- [6c17d66c] Fix some staticcheck issues +- [d45b5f34] Fix some gosimple issues +- [90e501a6] Correct the year in the govc changelog +- [8082a261] Update XDR to use fork +- [e94ec246] govc/USAGE.md: Update documentation +- [3fde3319] snapshot.tree: Show snapshots description +- [1d6f743b] Fix year in changelog +- [39b2c871] support customize vm folder in ovf deploy +- [3ad203d3] Use rest.Client for library uploads +- [5d24c38c] lib/finder: Support filenames with "/" +- [087f09f9] govc library: use govc/flags for Datastore and ResourcePool +- [d1a7f491] Remove nested progress.Tee usage +- [7312711e] govc/vm/*: Fix some gosec Errors unhandled issues +- [88601bb7] vcsim/*: Fix Errors unhandled issues +- [61d04b46] session/*: Fix Errors unhandled issues +- [f9a22349] vmdk/*: Fix gosec Errors unhandled issues +- [ca9b71a9] Fix gosec Expect directory permissions to be 0750 or less issues +- [6083e891] Fix gosec potential file inclusion via variable issues +- [38091bf8] Build changes needed for content library +- [885d4b44] Content library additions/finder +- [3fb72d1a] Add support for content library +- [64f2a5ea] Fix API Version check. +- [718331e3] govc/*: Fix some staticcheck issues +- [ba7923ae] Fix all staticcheck "error strings should not be capitalized" issues +- [ed32a917] simulator/*: Fix some staticcheck issues +- [f71d4efb] govc/vm/*: Fix staticcheck issues +- [3d77e2b1] vim25/*: Fix staticcheck issues +- [d711005a] .gitignore: add editor files *~ +- [43ff04f1] Fix [#1173](https://github.com/vmware/govmomi/issues/1173) +- [562aa0db] Go Mod Support + + +## [Release v0.20.2](https://github.com/vmware/govmomi/compare/v0.20.1...v0.20.2) + +> Release Date: 2019-07-03 + +### ⚠️ BREAKING + +### 📖 Commits + +- [bd9cfd18] Set the InventoryPath of the folder object in DefaultFolder ([#1515](https://github.com/vmware/govmomi/issues/1515)) + + +## [Release v0.20.1](https://github.com/vmware/govmomi/compare/v0.20.0...v0.20.1) + +> Release Date: 2019-05-20 + +### ⚠️ BREAKING + +### 📖 Commits + +- [4514987f] Fix port signature in REST endpoint token auth + + +## [Release v0.20.0](https://github.com/vmware/govmomi/compare/v0.19.0...v0.20.0) + +> Release Date: 2019-02-06 + +### 💫 `govc` (CLI) + +- [308dbf99] fix object.collect error for multiple objects with same path +- [4635c1cc] add device name match support to device.ls and device.remove +- [c36eb50f] add vm.disk.attach -mode flag +- [b234cdbc] add category option to relevant tags commands +- [afe5f42d] add vm.create -version option +- [b733db99] fields.set can now add missing fields +- [cad627a6] add fields.info command + +### 💫 `vcsim` (Simulator) + +- [957ef0f7] require authentication in vapi simulator +- [32148187] Resolve issue making device changes on clone (resolves [#1355](https://github.com/vmware/govmomi/issues/1355)) +- [cbb4abc9] fix SearchDatastore task info entity +- [2682c021] add EnvironmentBrowser support +- [3b9a4c9f] avoid zero IP address in GOVC_URL output +- [1921f73a] avoid panic when event template is not defined +- [d79013aa] implement RefreshStorageInfo method for virtual machine +- [69dfdd77] configure HostSystem port +- [bba50b40] datastore.upload now creates missing directories in destination path. +- [d2506759] add option to run container as vm +- [47284860] add SessionIsActive support +- [c5ee00bf] fix fault detail encoding +- [1284300c] support base types in property filter +- [25ae5c67] PropertyCollector should not require PathSet +- [4f1c89e5] allow '.' in vm name +- [b8c04142] populate VM guest.net field +- [223b2a2a] add SearchIndex FindByDnsName support +- [b26e10f0] correct property update in RemoveSnapshotTask +- [693f3fb6] update VM snapshot methods to change VM properties with UpdateObject +- [06e13bbe] support setting vm fields via extraConfig +- [a4330365] update VM configureDevices method to change VM properties with UpdateObject +- [5f8acb7a] update VM device add operation - stricter key generation, new InvalidDeviceSpec condition +- [846ae27a] add PBM support +- [d41d18aa] put VM into registry earlier during CreateVM +- [89b4c2ce] add datastore access check for vm host placement +- [f9f9938e] add task_manager description property templates +- [9bb5bde2] fix defaults when generating vmdk paths +- [0b650fd3] fix custom_fields_manager test +- [588bc224] replace HostSystem template IP with vcsim listen address +- [7066f8dc] Change CustomFieldsManager SetField to use ctx.WithLock and add InvalidArgument fault check. +- [fe070811] update DVS methods to use UpdateObject instead of setting fields directly +- [03939cce] add vslm support +- [c02efc3d] add setCustomValue support +- [94804159] add fault message to PropertyCollector RetrieveProperties +- [36035f5b] add HistoryCollector scrollable view support + +### ⚠️ BREAKING + +### 📖 Commits + +- [da7af247] Fix for govc/build.sh wrong dir +- [90a863be] Update docs for 0.20 release +- [957ef0f7] vcsim: require authentication in vapi simulator +- [32148187] vcsim: Resolve issue making device changes on clone (resolves [#1355](https://github.com/vmware/govmomi/issues/1355)) +- [a7563c4d] Use path id for tag-association requests +- [cbb4abc9] vcsim: fix SearchDatastore task info entity +- [2682c021] vcsim: add EnvironmentBrowser support +- [3b9a4c9f] vcsim: avoid zero IP address in GOVC_URL output +- [b261f25d] Add 2x blog posts about vcsim +- [1921f73a] vcsim: avoid panic when event template is not defined +- [308dbf99] govc: fix object.collect error for multiple objects with same path +- [d79013aa] vcsim: implement RefreshStorageInfo method for virtual machine +- [69dfdd77] vcsim: configure HostSystem port +- [4f50681f] Fix of the missing http body close under soap client upload +- [bba50b40] vcsim: datastore.upload now creates missing directories in destination path. +- [8ac7c5a8] Fixed 64-bit aligment issues with atomic counters +- [7ca12ea2] fix device.info Write output +- [3a82237c] device.ls -json doesn't work for now +- [86f4ba29] ssoadmin:create local group and add users to group ([#1327](https://github.com/vmware/govmomi/issues/1327)) +- [2d8ef2c6] Format with latest version of goimports +- [4635c1cc] govc: add device name match support to device.ls and device.remove +- [d7857a13] Updated the examples for the correct format +- [71e19136] Updated to reflect PR feedback +- [d2506759] vcsim: add option to run container as vm +- [61b7fe3e] Added string support +- [a72a4c42] Initial Support for PutUsbScanCodes +- [47284860] vcsim: add SessionIsActive support +- [c5ee00bf] vcsim: fix fault detail encoding +- [aaf83275] Summary of changes: 1. Changing the pbm client's path as java client is expecting /pbm. 2. Added PbmRetrieveServiceContent method in the unauthorized list. +- [c36eb50f] govc: add vm.disk.attach -mode flag +- [1284300c] vcsim: support base types in property filter +- [25ae5c67] vcsim: PropertyCollector should not require PathSet +- [b234cdbc] govc: add category option to relevant tags commands +- [138f30f8] Makefiles for govc/vcsim; updates govc/build.sh +- [4f1c89e5] vcsim: allow '.' in vm name +- [afe5f42d] govc: add vm.create -version option +- [b8c04142] vcsim: populate VM guest.net field +- [223b2a2a] vcsim: add SearchIndex FindByDnsName support +- [b26e10f0] vcsim: correct property update in RemoveSnapshotTask +- [693f3fb6] vcsim: update VM snapshot methods to change VM properties with UpdateObject +- [e5948f44] build: Refactored Travis-CI to use containers +- [06e13bbe] vcsim: support setting vm fields via extraConfig +- [651d4881] Allow pointer values in mo.ApplyPropertyChange +- [546a7df6] Tags support for First Class Disks +- [a4330365] vcsim: update VM configureDevices method to change VM properties with UpdateObject +- [5f8acb7a] vcsim: update VM device add operation - stricter key generation, new InvalidDeviceSpec condition +- [86375ceb] Merge branch 'master' into fields-info +- [bf962f18] Update govc/fields/add.go +- [98575e0c] Update govc/fields/add.go +- [b733db99] govc: fields.set can now add missing fields +- [cad627a6] govc: add fields.info command +- [ed2a4cff] vm.power: Make waiting for op completion optional +- [846ae27a] vcsim: add PBM support +- [d41d18aa] vcsim: put VM into registry earlier during CreateVM +- [1926071e] Datastore Cluster placement support for First Class Disks +- [89b4c2ce] vcsim: add datastore access check for vm host placement +- [f9f9938e] vcsim: add task_manager description property templates +- [9bb5bde2] vcsim: fix defaults when generating vmdk paths +- [0b650fd3] vcsim: fix custom_fields_manager test +- [588bc224] vcsim: replace HostSystem template IP with vcsim listen address +- [7066f8dc] vcsim: Change CustomFieldsManager SetField to use ctx.WithLock and add InvalidArgument fault check. +- [ef517cae] Display category name instead of ID in govc tags.info +- [d69c9787] goimports updates +- [fe070811] vcsim: update DVS methods to use UpdateObject instead of setting fields directly +- [03939cce] vcsim: add vslm support +- [accb2863] Add vslm package and govc disk commands +- [478ebae6] [doc] add an example for cpu and memory hotplug +- [c02efc3d] vcsim: add setCustomValue support +- [c3c79d16] goimports updates +- [ce71b6c2] vcsa: bump to 6.7.0 U1 +- [94804159] vcsim: add fault message to PropertyCollector RetrieveProperties +- [1ad0d87d] Removed NewWithDelay (not needed anymore) +- [5900feef] Updated documentation +- [5a87902b] Added delay functionality +- [c0518fd2] Add LoginByToken to session KeepAliveHandler +- [e0736431] Update Ansible link in README +- [36035f5b] vcsim: add HistoryCollector scrollable view support +- [bc2636fe] Move govc tags rest.Client helper to ClientFlag +- [54a181af] Add SSO support for vAPI +- [8817c27b] replace * by client's host+port +- [ac898b50] change hostname only if set to * and still set thumbprint +- [7a5cc6b7] replace hostname only if unset + + +## [Release v0.19.0](https://github.com/vmware/govmomi/compare/v0.18.0...v0.19.0) + +> Release Date: 2018-09-30 + +### 💫 `govc` (CLI) + +- [6b4a62b1] fix test case for new cluster.rule.info command +- [1350eea6] add new command cluster.rule.info + +### 💫 `vcsim` (Simulator) + +- [f3260968] add dvpg networks to HostSystem.Parent +- [17352fce] add support for tags API +- [c29d4b12] Logout should not unregister PropertyCollector singleton +- [11fb0d58] add ResetVM and SuspendVM support +- [39e6592d] add support for PropertyCollector incremental updates +- [619fbe28] do not include DVS in HostSystem.Network + +### ⚠️ BREAKING + +### 📖 Commits + +- [3617f28d] Update docs for 0.19 release +- [4316838a] vcsa: bump to 6.7.0d +- [64d875b9] Added PerformanceManager simulator +- [f3260968] vcsim: add dvpg networks to HostSystem.Parent +- [862da065] Allowing the use of STS for exchanging tokens +- [83ce863a] Handle empty file name in import.spec +- [a99f702d] Bump travis golang version from 1.10 to 1.11 +- [e4e8e2d6] Clean up unit test messaging +- [8e04e3c7] Run goimports on go source files +- [2431ae00] Add mailmap for bruceadowns +- [e805b4ea] Updates per dep ensure integration +- [70589fb6] Add ignore of intellij project settings directory +- [d114fa69] Print action for dvs security groups +- [d458266a] fix double err check +- [3f0e0aa7] remove providerSummary cache +- [cf9c16c4] Avoid use of Finder all param in govc +- [c4face4f] Print DVS rules for dvportgroup +- [91a33dd4] Finalize tags API +- [7d54bf9f] README: Fix path to LICENSE.txt file +- [17352fce] vcsim: add support for tags API +- [c29d4b12] vcsim: Logout should not unregister PropertyCollector singleton +- [8bda0ee1] Fix format in test +- [8be5207c] Add test for WaitOption.MaxWaitSeconds == 0 behaviour in simulator +- [900e1a35] Fix the WaitOption.MaxWaitSeconds == 0 behaviour in simulator +- [056ad0d4] vcsa: bump to 6.7.0c release +- [6b4a62b1] govc: fix test case for new cluster.rule.info command +- [1350eea6] govc: add new command cluster.rule.info +- [a05cd4b0] add output in cluster.rule.ls -name for ClusterVmHostRuleInfo and ClusterDependencyRuleInfo rules, add -l Option to cluster.rule.ls +- [11fb0d58] vcsim: add ResetVM and SuspendVM support +- [3e6b2d6e] Add ability to move multiple hosts into a cluster +- [e9f9920f] Add method to move host into cluster +- [39e6592d] vcsim: add support for PropertyCollector incremental updates +- [b7c270c6] Add testing support for govc tags commands +- [619fbe28] vcsim: do not include DVS in HostSystem.Network +- [6b6060dc] show rule details for ClusterVmHostRuleInfo rules in cluster.rule.ls +- [0c28a25d] Use govc find instead of ls to assign licenses +- [c1377063] Only test with Go 1.10 on Travis CI +- [4cfadda5] Avoid panic if fault detail is nil +- [d06874e1] Upgrade for govc tags commands +- [fdfaec9c] Better documentation for VirtualMachine.UUID +- [e1285a03] Add UUID helper for VirtualMachine +- [919b728c] Complete tags management APIs ([#1162](https://github.com/vmware/govmomi/issues/1162)) +- [b3251638] vcsa: bump to 6.7.0a release +- [a1fbb6ef] Optionally check root CAs for validity ([#1154](https://github.com/vmware/govmomi/issues/1154)) +- [add38bed] Fixed govc host.info logical CPU count +- [1ddfb011] Tags Categories cmd available ([#1150](https://github.com/vmware/govmomi/issues/1150)) +- [83ae35fb] default MarkAsTemplate to false in import spec +- [49f0dea7] add option to mark VM as template on OVX import +- [1f9e19f4] example: uniform unit for host memory +- [4cfd1376] fix example output. + + +## [Release v0.18.0](https://github.com/vmware/govmomi/compare/v0.17.1...v0.18.0) + +> Release Date: 2018-05-24 + +### 💫 `govc` (CLI) + +- [b841ae01] import.ovf pool flag should be optional if host is specified +- [f5c84b98] avoid Login() attempt if username is not set +- [d91fcbf4] add json support to find command +- [ba2d2323] fix host.esxcli error handling + +### 💫 `vcsim` (Simulator) + +- [8a5438b0] add STS simulator +- [c0337740] use VirtualDisk CapacityInKB for device summary +- [3d7fbac2] add property collector field type mapping for integer arrays + +### ⚠️ BREAKING + +### 📖 Commits + +- [e4b69fab] Update docs for 0.18 release +- [1dbfb317] Bump versions +- [b841ae01] govc: import.ovf pool flag should be optional if host is specified +- [96a905c1] Add -sharing option to vm.disk.create and vm.disk.attach +- [4b4e2aaa] Add VirtualDiskManager wrapper to set UUID +- [40a565b3] adjust datastore size when vm is added or updated or deleted +- [7f6479ba] update datastore capacity and free space when it is started +- [76dfefd3] Avoid recursive root path search in govc find command +- [623c7fa9] Change key name according to Datacenter object +- [24d0cf1b] added check for `InstanceUuid` when `VmSearch` is true in `FindByUuid` +- [25fc474c] Issue token if needed for govc sso commands +- [822fd1c0] Fixed leading "/" requirement in FindByInventoryPath +- [59d9f6a0] Add devbox scripts +- [fd45d81c] Add -U option to sso.service.ls +- [f5c84b98] govc: avoid Login() attempt if username is not set +- [8a5438b0] vcsim: add STS simulator +- [93f7fbbd] Fix govc vm.clone -annotation flag +- [bcff5383] save CapacityInKB in thousand delimited format +- [db12d4cb] Avoid possible panic in portgroup EthernetCardBackingInfo +- [d120efcb] Add STS support for token renewal +- [76b1ceaf] Add vmxnet2, pcnet32 and sriov to VirtualDeviceList.EthernetCardTypes +- [c0337740] vcsim: use VirtualDisk CapacityInKB for device summary +- [3d7fbac2] vcsim: add property collector field type mapping for integer arrays +- [42b30bb6] Finder.DefaultHostSystem should find hosts in nested folders +- [b8323d6b] Avoid property.Filter matching against unset properties +- [64788667] Update to vSphere 6.7 API +- [d3ae3004] Bump vCenter and ESXi builds to the latest release +- [098fc449] Add ssoadmin client and commands +- [80a9c20e] vm.Snapshot should be 'nil' instead of an empty 'vim.vm.SnapshotInfo' when there are no snapshots +- [1b1b428e] added failing tests for when vm.Snapshot should / shouldn't be 'nil' +- [a34ab4ba] Refactor LoginExtensionByCertificate tunnel usage +- [5b36033f] Lookup Service support +- [3f07eb74] add empty fields, but don't return them in the case of 'RetrievePropertiesEx' +- [05bdabe0] added failing test case for issue 1061 +- [903e8644] SAML token authentication support +- [d91fcbf4] govc: add json support to find command +- [ba2d2323] govc: fix host.esxcli error handling +- [ff687746] Dep Support +- [5f701460] Add -firmware parameter to 'govc vm.create' with values bios|efi + + +## [Release v0.17.1](https://github.com/vmware/govmomi/compare/v0.17.0...v0.17.1) + +> Release Date: 2018-03-19 + +### 💫 `vcsim` (Simulator) + +- [0502ee9b] add Destroy method for Folder and Datacenter types +- [0636dc8c] add EventManager.QueryEvents + +### ⚠️ BREAKING + +### 📖 Commits + +- [123ed177] govc release 0.17.1 +- [24d88451] Avoid possible panic in QueryVirtualDiskInfo +- [82129fb7] Add goreleaser to automate release process +- [ce88b296] Fix dvs.portgroup.info filtering +- [0502ee9b] vcsim: add Destroy method for Folder and Datacenter types +- [1620160d] In progress.Reader emit final report on EOF. +- [0636dc8c] vcsim: add EventManager.QueryEvents + + +## [Release v0.17.0](https://github.com/vmware/govmomi/compare/v0.16.0...v0.17.0) + +> Release Date: 2018-02-28 + +### 💫 `govc` (CLI) + +- [29498644] fix vm.clone to use -net flag when source does not have a NIC +- [d12b8f25] object.collect support for raw filters +- [6cb9fef8] fix host.info CPU usage +- [5786e7d2] add -cluster flag to license.assign command +- [d4ee331c] allow columns in guest login password ([#972](https://github.com/vmware/govmomi/issues/972)) + +### 💫 `vcsim` (Simulator) + +- [d2ba47d6] add simulator.Datastore type +- [937998a1] set VirtualMachine summary.config.instanceUuid +- [1c76c63d] update HostSystem.Summary.Host reference +- [274f3d63] add EventManager support +- [cc21a5ab] stats related fixes +- [fa2bee10] avoid data races +- [ca6f5d1d] respect VirtualDeviceConfigSpec FileOperation +- [7811dfce] avoid keeping the VM log file open +- [828ce5ec] add UpdateOptions support +- [d03f38fa] add session support +- [a3c9ed2b] Add VM.MarkAsTemplate support +- [50735461] more input spec honored in ReConfig VM +- [638d972b] Initialize VM fields properly +- [aa0382c1] Honor the input spec in ReConfig VM +- [42f9a133] Add HostLocalAccountManager +- [76f376a3] workaround xml ns issue with pyvsphere ([#958](https://github.com/vmware/govmomi/issues/958)) +- [45c5269b] add MakeDirectoryResponse ([#938](https://github.com/vmware/govmomi/issues/938)) +- [b4e77bd2] copy RoleList for AuthorizationManager ([#932](https://github.com/vmware/govmomi/issues/932)) +- [2a8a5168] apply vm spec NumCoresPerSocket ([#930](https://github.com/vmware/govmomi/issues/930)) +- [3a61d85f] Configure dvs with the dvs config spec +- [5f0f4004] Add VirtualMachine guest ID validation ([#921](https://github.com/vmware/govmomi/issues/921)) +- [ef571547] add QueryVirtualDiskUuid ([#920](https://github.com/vmware/govmomi/issues/920)) +- [27229ab7] update ServiceContent to 6.5 ([#917](https://github.com/vmware/govmomi/issues/917)) + +### ⚠️ BREAKING + +### 📖 Commits + +- [1d63da8d] govc release 0.17 +- [3017acf8] Print Table of Contents in usage.md Found good example of toc using markdown here: https://stackoverflow.com/a/448929/1572363 +- [ce54fe2c] Fix typo +- [201fc601] Implement Destroy task for HostSystem +- [92ce4244] Init PortKeys in DistributedVirtualPortgroup +- [795f2cc7] Avoid json encoding error in Go 1.10 +- [e805389e] Add 'Type' field to device.info -json output +- [d622f149] Use VirtualDiskManager in datastore cp and mv commands +- [f219bf3b] object: Return correct helper object for OpaqueNetwork +- [29498644] govc: fix vm.clone to use -net flag when source does not have a NIC +- [43c95b21] Fix build on Windows +- [38124002] Fix session persistence in session.login command +- [144bb1cf] Add support for Datacenter.PowerOnMultiVM +- [d2ba47d6] vcsim: add simulator.Datastore type +- [937998a1] vcsim: set VirtualMachine summary.config.instanceUuid +- [1c76c63d] vcsim: update HostSystem.Summary.Host reference +- [d12b8f25] govc: object.collect support for raw filters +- [274f3d63] vcsim: add EventManager support +- [cc21a5ab] vcsim: stats related fixes +- [2d30cde3] Fix broken datastore link in VM +- [54b160b2] Several context changes: +- [f643f0ae] Leverage contexts in http uploads +- [fa2bee10] vcsim: avoid data races +- [29bd00ec] Remove omitempty tag from AffinitySet field +- [ca6f5d1d] vcsim: respect VirtualDeviceConfigSpec FileOperation +- [7811dfce] vcsim: avoid keeping the VM log file open +- [6cb9fef8] govc: fix host.info CPU usage +- [5786e7d2] govc: add -cluster flag to license.assign command +- [63c86f29] Add datastore.disk.cp command +- [828ce5ec] vcsim: add UpdateOptions support +- [a13ad164] Bump vcsa scripts to use 6.5U1 EP5 +- [c447244d] Add CloneSession support to govc and vcsim +- [d03f38fa] vcsim: add session support +- [44e8d85e] Added AttachScsiLun function ([#987](https://github.com/vmware/govmomi/issues/987)) +- [a3c9ed2b] vcsim: Add VM.MarkAsTemplate support +- [3f8349f3] Add cluster vm override commands ([#977](https://github.com/vmware/govmomi/issues/977)) +- [91fbd1f7] Add option to filter events by type ([#976](https://github.com/vmware/govmomi/issues/976)) +- [1d8b92d9] User server clock in session.ls ([#973](https://github.com/vmware/govmomi/issues/973)) +- [50735461] vcsim: more input spec honored in ReConfig VM +- [638d972b] vcsim: Initialize VM fields properly +- [2892ed50] Add '-rescan-vmfs' option to host.storage.info ([#966](https://github.com/vmware/govmomi/issues/966)) +- [d4ee331c] govc: allow columns in guest login password ([#972](https://github.com/vmware/govmomi/issues/972)) +- [e15ff586] Use IsFileNotFound helper in Datastore.Stat ([#969](https://github.com/vmware/govmomi/issues/969)) +- [aa0382c1] vcsim: Honor the input spec in ReConfig VM +- [465bd948] Hook AccountManager to UserDirectory +- [aef2d795] Destroy event history collectors ([#962](https://github.com/vmware/govmomi/issues/962)) +- [42f9a133] vcsim: Add HostLocalAccountManager +- [76f376a3] vcsim: workaround xml ns issue with pyvsphere ([#958](https://github.com/vmware/govmomi/issues/958)) +- [a1c49292] Ignore AcquireLocalTicket errors ([#955](https://github.com/vmware/govmomi/issues/955)) +- [bb150d50] Add missing dependency in gen script +- [0eacf959] toolbox: validate request offset in ListFiles ([#946](https://github.com/vmware/govmomi/issues/946)) +- [1d6aed22] Corrects datastore.disk usage which had not been generated ([#951](https://github.com/vmware/govmomi/issues/951)) +- [de717389] Corrects vm.info usage with required args ([#950](https://github.com/vmware/govmomi/issues/950)) +- [c5ea3fb2] Add datastore.disk inflate and shrink commands ([#943](https://github.com/vmware/govmomi/issues/943)) +- [adf4530b] Corrects host.shutdown ([#939](https://github.com/vmware/govmomi/issues/939)) +- [45c5269b] vcsim: add MakeDirectoryResponse ([#938](https://github.com/vmware/govmomi/issues/938)) +- [b4e77bd2] vcsim: copy RoleList for AuthorizationManager ([#932](https://github.com/vmware/govmomi/issues/932)) +- [426a675a] Fix [#933](https://github.com/vmware/govmomi/issues/933) ([#936](https://github.com/vmware/govmomi/issues/936)) +- [3be5f1d9] Add cluster.group and cluster.rule commands ([#928](https://github.com/vmware/govmomi/issues/928)) +- [2a8a5168] vcsim: apply vm spec NumCoresPerSocket ([#930](https://github.com/vmware/govmomi/issues/930)) +- [3a61d85f] vcsim: Configure dvs with the dvs config spec +- [3b25c720] CreateChildDisk 6.7 support ([#926](https://github.com/vmware/govmomi/issues/926)) +- [933ee3b2] Add VirtualDiskManager.CreateChildDisk ([#925](https://github.com/vmware/govmomi/issues/925)) +- [5f0f4004] vcsim: Add VirtualMachine guest ID validation ([#921](https://github.com/vmware/govmomi/issues/921)) +- [ef571547] vcsim: add QueryVirtualDiskUuid ([#920](https://github.com/vmware/govmomi/issues/920)) +- [0ea3b9bd] Implemened vm.upgrade operation. ([#918](https://github.com/vmware/govmomi/issues/918)) +- [27229ab7] vcsim: update ServiceContent to 6.5 ([#917](https://github.com/vmware/govmomi/issues/917)) +- [46c79c93] Add support for cpu + mem allocation to vm.change command ([#916](https://github.com/vmware/govmomi/issues/916)) + + +## [Release v0.16.0](https://github.com/vmware/govmomi/compare/v0.15.0...v0.16.0) + +> Release Date: 2017-11-08 + +### 💫 `govc` (CLI) + +- [0295f1b0] Fix VM clone when source doesn't have vNics +- [4fea6863] add tasks and task.cancel commands +- [ddd32366] add reboot option to host.shutdown + +### 💫 `vcsim` (Simulator) + +- [4543f4b6] preserve order in QueryIpPools ([#914](https://github.com/vmware/govmomi/issues/914)) +- [b385183e] return moref from Task.Run ([#913](https://github.com/vmware/govmomi/issues/913)) +- [e29ab54a] Implement IpPoolManager lifecycle +- [b227a258] add autostart option to power on VMs ([#906](https://github.com/vmware/govmomi/issues/906)) +- [ecde4a89] use soapenv namespace for Fault types +- [b1318195] various property additions +- [c19ec714] Generate similar ref value like VC +- [f3046058] Add moref to vm's summary +- [5f3fba94] validate authz privilege ids +- [c2caa6d7] AuthorizationManager additions +- [2cb741f2] Add IpPoolManager +- [a46ab163] VirtualDisk file backing datastore is optional +- [d347175f] add PerformanceManager +- [df3763d5] Implement add/update/remove roles +- [ed18165d] Generate device filename in CreateVM +- [e8741bf0] add AuthorizationManager +- [8961efc1] populate vm snapshot fields +- [add0245e] Add UpdateNetworkConfig to HostNetworkSystem +- [2aa746c6] Implement virtual machine snapshot +- [104ddfb7] set VirtualDisk backing datastore +- [505b5c65] Implement enter/exit maintenance mode +- [a1f8a328] Implement add/remove license +- [585cf5e1] add portgroup related operations +- [a7e79a7e] add fields support +- [895573a5] remove use of df program for datastore info +- [defe810c] add FileQuery support to datastore search +- [5fcca79e] add HostConfigInfo template +- [920a70c1] add HostSystem hardware property +- [0833484e] Fix merging of default devices +- [f6a734f5] Add cdrom and scsi controller to Model VMs + +### ⚠️ BREAKING + +### 📖 Commits + +- [7d879bac] Doc updates ([#915](https://github.com/vmware/govmomi/issues/915)) +- [4543f4b6] vcsim: preserve order in QueryIpPools ([#914](https://github.com/vmware/govmomi/issues/914)) +- [b385183e] vcsim: return moref from Task.Run ([#913](https://github.com/vmware/govmomi/issues/913)) +- [c8738903] Remove tls-handshake-timeout flag ([#911](https://github.com/vmware/govmomi/issues/911)) +- [e29ab54a] vcsim: Implement IpPoolManager lifecycle +- [3619c1d9] Use ProgressLogger for vm.clone command ([#909](https://github.com/vmware/govmomi/issues/909)) +- [13f2aba4] readme: fix formatting of listing ([#908](https://github.com/vmware/govmomi/issues/908)) +- [b227a258] vcsim: add autostart option to power on VMs ([#906](https://github.com/vmware/govmomi/issues/906)) +- [79934451] Add installation procedure in README.md ([#902](https://github.com/vmware/govmomi/issues/902)) +- [ecde4a89] vcsim: use soapenv namespace for Fault types +- [b1318195] vcsim: various property additions +- [4d8737c9] Switch to kr/pretty package for the -dump flag +- [e050b1b6] Couple of fixes for import.spec result +- [017138ca] import.spec not to assign deploymentOption +- [c19ec714] vcsim: Generate similar ref value like VC +- [0295f1b0] govc: Fix VM clone when source doesn't have vNics +- [f3046058] vcsim: Add moref to vm's summary +- [bfed5eea] [govc] Introduce TLSHandshakeTimeout parameter ([#890](https://github.com/vmware/govmomi/issues/890)) +- [1c1291ca] Support import ova/ovf by URL +- [3cb5cc96] Remove BaseResourceAllocationInfo +- [5f3fba94] vcsim: validate authz privilege ids +- [c91b9605] Add clone methods to session manager +- [c2caa6d7] vcsim: AuthorizationManager additions +- [2cb741f2] vcsim: Add IpPoolManager +- [644c1859] Updates to vm.clone link + snapshot flags +- [cf624f1a] Add linked clone and snapshot support to vm.clone +- [024c09fe] Fix govc events output +- [d4d94f44] govc/events: read -json flag and output events as json +- [24e71ea4] Fix vm.register command template flag +- [5209daf2] Fix object name suffix matching in Finder +- [a46ab163] vcsim: VirtualDisk file backing datastore is optional +- [d347175f] vcsim: add PerformanceManager +- [df3763d5] vcsim: Implement add/update/remove roles +- [8d5c1558] Support clearing vm boot order +- [ed18165d] vcsim: Generate device filename in CreateVM +- [df93050a] Fix CustomFieldsManager.FindKey method signature +- [e8741bf0] vcsim: add AuthorizationManager +- [8961efc1] vcsim: populate vm snapshot fields +- [17fb12a5] Add method to find a CustomFieldDef by Key +- [bc395ef0] vscim: Implement UserDirectory +- [add0245e] vcsim: Add UpdateNetworkConfig to HostNetworkSystem +- [2aa746c6] vcsim: Implement virtual machine snapshot +- [104ddfb7] vcsim: set VirtualDisk backing datastore +- [f3f51c58] Add support for VM export +- [505b5c65] vcsim: Implement enter/exit maintenance mode +- [a1f8a328] vcsim: Implement add/remove license +- [585cf5e1] vcsim: add portgroup related operations +- [a7e79a7e] vcsim: add fields support +- [e2944227] vim25: Move internal stuff to internal package +- [c4cab690] Add support for SOAP request operation ID header +- [895573a5] vcsim: remove use of df program for datastore info +- [4dd9a518] Skip version check when using 6.7-dev API +- [cc2ed7db] Change optional ResourceAllocationInfo fields to pointers +- [3f145230] Use base type for DVS backing info +- [df1c3132] Add vm.console command +- [829b3f99] Fixup recent tasks output +- [c4e473af] Add '-refresh' option to host.storage.info +- [3df440c8] toolbox: avoid race when closing channels on stop +- [badad9a1] toolbox: reset session when invalidated by the vmx +- [a1a96c8f] Include "Name" in device.info -json +- [defe810c] vcsim: add FileQuery support to datastore search +- [93f62ef7] Default vm.migrate pool to the host pool +- [5fcca79e] vcsim: add HostConfigInfo template +- [4fea6863] govc: add tasks and task.cancel commands +- [596e51a0] Use ovf to import vmdk +- [920a70c1] vcsim: add HostSystem hardware property +- [9e2f8a78] Add info about maintenance mode in host.info +- [78f3fc19] Avoid panic if ova import task is canceled +- [11827c7a] toolbox: default to tar format for directory archives +- [8811f9bf] toolbox: make gzip optional for directory archive transfer +- [9703fe19] toolbox: avoid blocking the RPC channel when transferring process IO +- [d6f60304] Add view and filter support to object.collect command +- [3527a5f8] Tolerate repeated Close for file follower +- [ddd32366] govc: add reboot option to host.shutdown +- [4d9061ac] toolbox: use host management IP for guest file transfer +- [7d956b6b] toolbox: add Client Upload and Download methods +- [c7111c63] toolbox: support single file download via archive handler +- [ebb77d7c] Use vcsim in bats tests +- [4bb89668] vCenter cluster testbed automation +- [ad960e95] toolbox: SendGuestInfo before the vmx asks us to +- [bdea7ff3] toolbox: update vmw-guestinfo +- [51d12609] toolbox: remove receiver from DefaultStartCommand +- [114329fc] Add host thumbprint for use with guest file transfer +- [5083a277] Add FindByUuid method for Virtual Machine +- [e1ab84af] toolbox: map exec.ErrNotFound to vix.FileNotFound +- [d1091087] toolbox: pass URL to ArchiveHandler Read/Write methods +- [cddc353c] toolbox: make directory archive read/write customizable +- [ba6720ce] toolbox: add http and exec round trippers +- [b35abbc8] Handle object names containing a '/' +- [ac4891fb] toolbox: fix ListFiles when given a symlink +- [60a6510f] Minor correction in README.md +- [0c583dbc] toolbox: support transferring /proc files from guest +- [0833484e] vcsim: Fix merging of default devices +- [c9aaa3fa] Move toolbox from vmware/vic to govmomi +- [f6a734f5] vcsim: Add cdrom and scsi controller to Model VMs +- [9d47dd13] Move vcsim from vmware/vic to govmomi + + +## [Release v0.15.0](https://github.com/vmware/govmomi/compare/v0.14.0...v0.15.0) + +> Release Date: 2017-06-19 + +### ⚠️ BREAKING + +### 📖 Commits + +- [b63044e5] Release 0.15.0 +- [3d357ef2] Add dvs.portgroup.info usage +- [72977afb] Add support for guest.FileManager directory download +- [94837bf7] Update examples +- [e1bbcf52] Update wsdl generator +- [b16a3d81] fix the WaitOptions struct, MaxWaitSeconds is optional, but we can set the value 0 +- [9ca7a2b5] Support removal of ExtraConfig entries +- [86cc210c] Guest command updates +- [9c5f63e9] Doc updates +- [6d714f9e] New examples: datastores, hosts and virtualmachines using view package +- [f48e1151] update spew to be inline with testify +- [6f5c037c] Adjust message slice passed to include +- [48509bc3] Fix package name +- [6f635b73] Add host.shutdown command +- [67b13b52] Add doc on metric.sample instance flag ([#726](https://github.com/vmware/govmomi/issues/726)) +- [8bff8355] Fix tail n=0 case ([#725](https://github.com/vmware/govmomi/issues/725)) +- [10e6ced9] Update copyright ([#723](https://github.com/vmware/govmomi/issues/723)) +- [6f8ebd89] Allow caller to supply custom tail behavior ([#722](https://github.com/vmware/govmomi/issues/722)) +- [35caa01b] Add options to host.autostart.add ([#719](https://github.com/vmware/govmomi/issues/719)) +- [2030458d] Add VC options command ([#717](https://github.com/vmware/govmomi/issues/717)) +- [0ccad10c] Exported FindSnapshot() Method ([#715](https://github.com/vmware/govmomi/issues/715)) +- [34202aca] Additional wrapper functions for SPBM +- [c7f718b1] Add AuthorizationManager {Enable,Disable}Methods +- [d5e08cd2] Add PBM client and wrapper methods +- [58019ca9] Add generated types and methods for PBM +- [58960380] Regenerate against current vmodl.db +- [f736458f] Support non-Go clients in xml decoder + + +## [Release v0.14.0](https://github.com/vmware/govmomi/compare/v0.13.0...v0.14.0) + +> Release Date: 2017-04-08 + +### ⚠️ BREAKING + +### 📖 Commits + +- [9bfdc5ce] Release 0.14.0 +- [3ba0eba5] Release 0.13.0 +- [86063832] Add object.find command +- [0391e8eb] Adds FindManagedObject method. +- [796e87c8] Include embedded fields in object.collect output +- [2536e792] Use Duration flag for vm.ip -wait flag +- [3aa64170] Merge commit 'b0b51b50f40da2752c35266b7535b5bbbc8659e3' into marema31/govc-vm-ip-wait +- [59466881] Implement EthernetCardBackingInfo for OpaqueNetwork +- [0d2e1b22] Finder: support changing object root in find mode +- [9ded9d10] Add Bash completion script +- [3bd4ab46] Add QueryVirtualDiskInfo +- [16f6aa4f] Emacs: add metric select +- [3763321e] Add unit conversion to metric CSV +- [b0b51b50] Add -wait option to govc vm.ip to allow non-blocking query +- [f0d4774a] Add json support to metric ls and sample commands +- [c9de0310] Add performance manager and govc metric commands +- [d758f694] Add check for nil envelope +- [ab595fb3] Remove deferred Close() call in follower's Read() + + +## [Release v0.13.0](https://github.com/vmware/govmomi/compare/v0.12.1...v0.13.0) + +> Release Date: 2017-03-02 + +### 💫 `vcsim` (Simulator) + +- [5f7efaf1] esxcli FirewallInfo fixes ([#661](https://github.com/vmware/govmomi/issues/661)) + +### ⚠️ BREAKING + +### 📖 Commits + +- [b4a3f7a1] Release 0.13.0 +- [5bf03cb4] Add vm.guest.tools command +- [b4ef3b73] Host is optional for MarkAsVirtualMachine ([#675](https://github.com/vmware/govmomi/issues/675)) +- [f4a3ffe5] Add vsan and disk commands / helpers ([#672](https://github.com/vmware/govmomi/issues/672)) +- [1f82c282] Handle the case where e.VirtualSystem is nil ([#671](https://github.com/vmware/govmomi/issues/671)) +- [dd346974] Remove object.ListView ([#669](https://github.com/vmware/govmomi/issues/669)) +- [4994038a] Wraps the ContainerView managed object. ([#667](https://github.com/vmware/govmomi/issues/667)) +- [93064c06] Handle nil TaskInfo in task.Wait callback [#2](https://github.com/vmware/govmomi/issues/2) ([#666](https://github.com/vmware/govmomi/issues/666)) +- [f1f5b6cb] Handle nil TaskInfo in task.Wait callback ([#665](https://github.com/vmware/govmomi/issues/665)) +- [f3cf126d] Support alternative './...' syntax for finder ([#664](https://github.com/vmware/govmomi/issues/664)) +- [9bda6c3e] Finder: support automatic Folder recursion ([#663](https://github.com/vmware/govmomi/issues/663)) +- [0a28e595] Add a command line option to change an existing disk attached to a VM ([#658](https://github.com/vmware/govmomi/issues/658)) +- [3e95cb11] Attach and list RDM/LUN ([#656](https://github.com/vmware/govmomi/issues/656)) +- [5f7efaf1] vcsim: esxcli FirewallInfo fixes ([#661](https://github.com/vmware/govmomi/issues/661)) +- [17e6545f] Add device option to WaitForNetIP ([#660](https://github.com/vmware/govmomi/issues/660)) +- [ba9e3f44] Fix vm.change test +- [e66c8344] Add the option to describe a VM using the annotation option in ConfigSpec ([#657](https://github.com/vmware/govmomi/issues/657)) +- [505fcf9c] Update doc +- [913c0eb4] Add support for reading and changing SyncTimeWithHost option ([#539](https://github.com/vmware/govmomi/issues/539)) +- [682494e1] Remove _Task suffix from vapp methods +- [733acc9e] Emacs: add govc-command-history +- [ea52d587] Add object.collect command ([#652](https://github.com/vmware/govmomi/issues/652)) +- [f49782a8] Update email address for contributor Bruce Downs + + +## [Release v0.12.1](https://github.com/vmware/govmomi/compare/v0.12.0...v0.12.1) + +> Release Date: 2016-12-19 + +### ⚠️ BREAKING + +### 📖 Commits + +- [6103db21] Release 0.12.1 +- [45a53517] Note 6.5 support +- [fec40b21] Add '-f' flag to logs command ([#643](https://github.com/vmware/govmomi/issues/643)) +- [40cf9f80] govc.el: auth-source integration ([#648](https://github.com/vmware/govmomi/issues/648)) +- [ca99f8de] Add govc-command customization option ([#645](https://github.com/vmware/govmomi/issues/645)) +- [ad6e5634] Avoid Finder panic when SetDatacenter is not called ([#640](https://github.com/vmware/govmomi/issues/640)) +- [b5c807e3] Add storage support to vm.migrate ([#641](https://github.com/vmware/govmomi/issues/641)) +- [1a7dc61e] govc/version: skip first char in git version mismatch error ([#642](https://github.com/vmware/govmomi/issues/642)) +- [6bc730e1] Add Slack links +- [e152c355] Add DatastorePath helper ([#638](https://github.com/vmware/govmomi/issues/638)) +- [5b4d5215] Add support for file backed serialport devices ([#637](https://github.com/vmware/govmomi/issues/637)) +- [f49bd564] Add vm.ip docs ([#636](https://github.com/vmware/govmomi/issues/636)) -* Add new vSphere 6.7 APIs + +## [Release v0.12.0](https://github.com/vmware/govmomi/compare/v0.11.4...v0.12.0) -* Decrease LoginExtensionByCertificate tunnel usage +> Release Date: 2016-12-01 -* SAML token authentication support via SessionManager.LoginByToken +### ⚠️ BREAKING -* New SSO admin client for managing users +### 📖 Commits -* New STS client for issuing and renewing SAML tokens +- [ab40ac73] Release 0.12.0 +- [e702e188] Disable use of service ticket for datastore HTTP access by default ([#635](https://github.com/vmware/govmomi/issues/635)) +- [1fba1af7] Attach context to HTTP requests for cancellations +- [79cb3d93] Support InjectOvfEnv without PowerOn when importing +- [117118a2] Support stdin as import options source +- [b10f20f4] Don't ignore version/manifest for existing sessions +- [82929d3f] Add basic VirtualNVMEController support +- [757a2d6d] re-generate vim25 using 6.5.0 -* New Lookup Service client for discovering endpoints such as STS and ssoadmin + +## [Release v0.11.4](https://github.com/vmware/govmomi/compare/v0.11.3...v0.11.4) -* Switch from gvt to go dep for managing dependencies +> Release Date: 2016-11-15 -### 0.17.1 (2018-03-19) +### ⚠️ BREAKING -* vcsim: add Destroy method for Folder and Datacenter types +### 📖 Commits -* In progress.Reader emit final report on EOF. +- [b9bcc6f4] Release 0.11.4 +- [dbbf84e8] Add authz role helpers and commands +- [765b34dc] Add folder/pod examples +- [79cb52fd] Add host.account examples +- [2a2cab2a] Add host.portgroup.change examples -* vcsim: add EventManager.QueryEvents + +## [Release v0.11.3](https://github.com/vmware/govmomi/compare/v0.11.2...v0.11.3) -### 0.17.0 (2018-02-28) +> Release Date: 2016-11-08 -* Add HostStorageSystem.AttachScsiLun method +### ⚠️ BREAKING -* Avoid possible panic in Datastore.Stat (#969) +### 📖 Commits -* Destroy event history collectors (#962) +- [e16673dd] Release 0.11.3 +- [629a573f] Add -product-version flag to dvs.create +- [83028634] Allow DatastoreFile follower to drain current body -* Add VirtualDiskManager.CreateChildDisk method + +## [Release v0.11.2](https://github.com/vmware/govmomi/compare/v0.11.1...v0.11.2) + +> Release Date: 2016-11-01 + +### ⚠️ BREAKING + +### 📖 Commits + +- [cd80b8e8] Release 0.11.2 +- [f15dcbdc] Avoid possible NPE in VirtualMachine.Device method +- [128b352e] Add support for OpaqueNetwork type +- [c5b9a266] Add host account manager support for 5.5 + + +## [Release v0.11.1](https://github.com/vmware/govmomi/compare/v0.11.0...v0.11.1) + +> Release Date: 2016-10-27 + +### ⚠️ BREAKING + +### 📖 Commits + +- [1a7df5e3] Release 0.11.1 +- [1ae858d1] Add support for VirtualApp in pool.change command +- [91b2ad48] Release script tweaks + + +## [Release v0.11.0](https://github.com/vmware/govmomi/compare/v0.10.0...v0.11.0) + +> Release Date: 2016-10-25 -### 0.16.0 (2017-11-08) +### ⚠️ BREAKING + +### 📖 Commits + +- [a16901d7] Release 0.11.0 +- [4fc9deb4] Add object destroy and rename commands +- [82634835] Add dvs.portgroup.change command + + +## [Release v0.10.0](https://github.com/vmware/govmomi/compare/v0.9.0...v0.10.0) + +> Release Date: 2016-10-20 + +### ⚠️ BREAKING + +### 📖 Commits + +- [bb498f73] Release 0.10.0 +- [468a15af] Release script updates +- [1c3499c4] Documentation updates +- [1e52d88a] Update contributors +- [e3d59fd9] Fix snapshot.tree on vm with no snapshots +- [711fdd9c] Add host.date info and change commands +- [16d7514a] Add govc session ls and rm commands +- [73c471a9] Add HostConfigManager field checks +- [d7f94557] Improve cluster/host add thumbprint support +- [fea8955b] Add session.Locale var to change default locale +- [eefe6cc1] Add service ticket thumbprint validation +- [3a0a61a6] Set default locale to en_US +- [aa1a9a84] TLS enhancements +- [9f0e9654] Treat DatastoreFile follower Close as "stop" +- [838b2efa] Support typeattr for enum string types +- [dcbc9d56] Make vm.ip esxcli test optional +- [9e20e0ae] Remove vca references +- [7c708b2e] Adding vSPC proxyURI to govc + + +## [Release v0.9.0](https://github.com/vmware/govmomi/compare/v0.8.0...v0.9.0) -* Add support for SOAP request operation ID header - -* Moved ovf helpers from govc import.ovf command to ovf and nfc packages - -* Added guest/toolbox (client) package - -* Added toolbox package and toolbox command - -* Added simulator package and vcsim command - -### 0.15.0 (2017-06-19) - -* WaitOptions.MaxWaitSeconds is now optional - -* Support removal of ExtraConfig entries - -* GuestPosixFileAttributes OwnerId and GroupId fields are now pointers, - rather than omitempty ints to allow chown with root uid:gid - -* Updated examples/ using view package - -* Add DatastoreFile.TailFunc method - -* Export VirtualMachine.FindSnapshot method - -* Add AuthorizationManager {Enable,Disable}Methods - -* Add PBM client - -### 0.14.0 (2017-04-08) - -* Add view.ContainerView type and methods - -* Add Collector.RetrieveWithFilter method - -* Add property.Filter type - -* Implement EthernetCardBackingInfo for OpaqueNetwork - -* Finder: support changing object root in find mode - -* Add VirtualDiskManager.QueryVirtualDiskInfo - -* Add performance.Manager APIs - -### 0.13.0 (2017-03-02) - -* Add DatastoreFileManager API wrapper - -* Add HostVsanInternalSystem API wrappers - -* Add Container support to view package - -* Finder supports Folder recursion without specifying a path - -* Add VirtualMachine.QueryConfigTarget method - -* Add device option to VirtualMachine.WaitForNetIP - -* Remove _Task suffix from vapp methods - -### 0.12.1 (2016-12-19) - -* Add DiagnosticLog helper - -* Add DatastorePath helper - -### 0.12.0 (2016-12-01) - -* Disable use of service ticket for datastore HTTP access by default - -* Attach context to HTTP requests for cancellations - -* Update to vim25/6.5 API - -### 0.11.4 (2016-11-15) - -* Add object.AuthorizationManager methods: RetrieveRolePermissions, RetrieveAllPermissions, AddRole, RemoveRole, UpdateRole - -### 0.11.3 (2016-11-08) - -* Allow DatastoreFile.Follow reader to drain current body after stopping - -### 0.11.2 (2016-11-01) - -* Avoid possible NPE in VirtualMachine.Device method - -* Add support for OpaqueNetwork type to Finder - -* Add HostConfigManager.AccountManager support for ESX 5.5 - -### 0.11.1 (2016-10-27) - -* Add Finder.ResourcePoolListAll method - -### 0.11.0 (2016-10-25) - -* Add object.DistributedVirtualPortgroup.Reconfigure method - -### 0.10.0 (2016-10-20) - -* Add option to set soap.Client.UserAgent - -* Add service ticket thumbprint validation - -* Update use of http.DefaultTransport fields to 1.7 - -* Set default locale to en_US (override with GOVMOMI_LOCALE env var) - -* Add object.HostCertificateInfo (types.HostCertificateManagerCertificateInfo helpers) - -* Add object.HostCertificateManager type and HostConfigManager.CertificateManager method - -* Add soap.Client SetRootCAs and SetDialTLS methods - -### 0.9.0 (2016-09-09) - -* Add object.DatastoreFile helpers for streaming and tailing datastore files - -* Add object VirtualMachine.Unregister method - -* Add object.ListView methods: Add, Remove, Reset - -* Update to Go 1.7 - using stdlib's context package - -### 0.8.0 (2016-06-30) - -* Add session.Manager.AcquireLocalTicket - -* Include StoragePod in Finder.FolderList - -* Add Finder methods for finding by ManagedObjectReference: Element, ObjectReference - -* Add mo.ManagedObjectReference methods: Reference, String, FromString - -* Add support using SessionManagerGenericServiceTicket.HostName for Datastore HTTP access - -### 0.7.1 (2016-06-03) - -* Fix object.ObjectName method - -### 0.7.0 (2016-06-02) - -* Move InventoryPath field to object.Common - -* Add HostDatastoreSystem.CreateLocalDatastore method - -* Add DatastoreNamespaceManager methods: CreateDirectory, DeleteDirectory - -* Add HostServiceSystem - -* Add HostStorageSystem methods: MarkAsSdd, MarkAsNonSdd, MarkAsLocal, MarkAsNonLocal - -* Add HostStorageSystem.RescanAllHba method - -### 0.6.2 (2016-05-11) - -* Get complete file details in Datastore.Stat - -* SOAP decoding fixes - -* Add VirtualMachine.RemoveAllSnapshot - -### 0.6.1 (2016-04-30) - -* Fix mo.Entity interface - -### 0.6.0 (2016-04-29) - -* Add Common.Rename method - -* Add mo.Entity interface - -* Add OptionManager - -* Add Finder.FolderList method - -* Add VirtualMachine.WaitForNetIP method - -* Add VirtualMachine.RevertToSnapshot method - -* Add Datastore.Download method - -### 0.5.0 (2016-03-30) - -Generated fields using xsd type 'int' change to Go type 'int32' - -VirtualDevice.UnitNumber field changed to pointer type - -### 0.4.0 (2016-02-26) - -* Add method to convert virtual device list to array with virtual device - changes that can be used in the VirtualMachineConfigSpec. - -* Make datastore cluster traversable in lister - -* Add finder.DatastoreCluster methods (also known as storage pods) - -* Add Drone CI check - -* Add object.Datastore Type and AttachedClusterHosts methods - -* Add finder.*OrDefault methods - -### 0.3.0 (2016-01-16) - -* Add object.VirtualNicManager wrapper - -* Add object.HostVsanSystem wrapper - -* Add object.HostSystem methods: EnterMaintenanceMode, ExitMaintenanceMode, Disconnect, Reconnect - -* Add finder.Folder method - -* Add object.Common.Destroy method - -* Add object.ComputeResource.Reconfigure method - -* Add license.AssignmentManager wrapper - -* Add object.HostFirewallSystem wrapper - -* Add object.DiagnosticManager wrapper - -* Add LoginExtensionByCertificate support - -* Add object.ExtensionManager - -... - -### 0.2.0 (2015-09-15) - -* Update to vim25/6.0 API - -* Stop returning children from `ManagedObjectList` - - Change the `ManagedObjectList` function in the `find` package to only - return the managed objects specified by the path argument and not their - children. The original behavior was used by govc's `ls` command and is - now available in the newly added function `ManagedObjectListChildren`. - -* Add retry functionality to vim25 package - -* Change finder functions to no longer take varargs - - The `find` package had functions to return a list of objects, given a - variable number of patterns. This makes it impossible to distinguish which - patterns produced results and which ones didn't. - - In particular for govc, where multiple arguments can be passed from the - command line, it is useful to let the user know which ones produce results - and which ones don't. - - To evaluate multiple patterns, the user should call the find functions - multiple times (either serially or in parallel). - -* Make optional boolean fields pointers (`vim25/types`). - - False is the zero value of a boolean field, which means they are not serialized - if the field is marked "omitempty". If the field is a pointer instead, the zero - value will be the nil pointer, and both true and false values are serialized. - -### 0.1.0 (2015-03-17) - -Prior to this version the API of this library was in flux. - -Notable changes w.r.t. the state of this library before March 2015 are: - -* All functions that may execute a request take a `context.Context` parameter. -* The `vim25` package contains a minimal client implementation. -* The property collector and its convenience functions live in the `property` package. +> Release Date: 2016-09-09 + +### ⚠️ BREAKING + +### 📖 Commits + +- [f9184c1d] Release 0.9.0 +- [e050cb6d] Add govc -h flag +- [a4343ea8] Set default ScsiCtlrUnitNumber +- [a920d73d] Add -R option to datastore.ls +- [f517decc] Fix SCSI device unit number selection +- [abaf7597] Add DatastoreFile helpers +- [7cfa7491] Make Datastore ServiceTicket optional +- [9ad57862] Add vm.migrate command +- [c66458f9] Add govc vm.{un}register commands +- [54c0c6e5] Checking result of reflect.TypeOf is not nil before continuing +- [ea0189ea] Fix flags.NewOptionalBool panic +- [a9cdf437] Add govc guest command tests +- [38dee111] Add VirtualMachine.Unregister func +- [98b50d49] make curl follow HTTP redirects +- [8a27691f] make goreportcard happy +- [bf66f750] Add govc vm snapshot commands +- [eb02131a] Validate vm.clone -vm flag value +- [62159d11] Add device.usb.add command +- [27e02431] Remove a bunch of context.TODO() calls. +- [a9cee43a] Fixing tailing for events command +- [4fa7b32a] Bump to 1.7 and start using new context pkg +- [4b7c59bf] Fix missing datastore name with vm.clone -force=false +- [e3642fce] Fix deletion of powered off vApp +- [63d60025] Support stdin/stdout in datastore upload/download +- [e149909e] Emacs: add govc-session-network +- [0ccc1788] Emacs: add govc json diff +- [f1d6e127] Add host.portgroup.change command +- [6f441a84] Add host.portgroup.info command +- [aaf40729] Add HostNetworkPolicy to host.vswitch.info +- [5ccb0572] Add json support to host.vswitch.info command +- [9d19d1f7] Support instance uuid in SearchFlag +- [2d3bfc9f] Add json support to esxcli command +- [bac04959] Support multiple NICs with vm.ip -esxcli +- [b3177d23] Add -unclaimed flag to host.storage.info command +- [b1234a90] govc - popualte 'Path' fiels in xxx.info output +- [7cab0ab6] Implemented additional ListView methods +- [498cb97d] Add 'Annotation' attribute to importx options. +- [223168f0] Add NetworkMapping section to importx options. +- [5c708f6b] Remove vendor target from the Makefile +- [f8199eb8] Handle errors in QueryVirtualDiskUUid function ([#548](https://github.com/vmware/govmomi/issues/548)) +- [73dcde2c] vendor github.com/davecgh/go-spew/spew +- [e1e407f7] vendor golang.org/x/net/context +- [e3c3cd0a] Populate network mapping from ovf envelope ([#546](https://github.com/vmware/govmomi/issues/546)) +- [fa6668dc] Add QueryVirtualDiskUuid function ([#545](https://github.com/vmware/govmomi/issues/545)) +- [17682d5b] Fixes panic in govc events + + +## [Release v0.8.0](https://github.com/vmware/govmomi/compare/v0.7.1...v0.8.0) + +> Release Date: 2016-06-30 + +### ⚠️ BREAKING + +### 📖 Commits + +- [c0c7ce63] Release 0.8.0 +- [ce4b0be6] Disable datastore service ticket hostname usage +- [3e44fe88] Add support for login via local ticket +- [acf37905] Add StoragePod support to govc folder.create +- [94d4e2c9] Include StoragePod in Finder.FolderList +- [473f3885] Avoid use of eval with govc env +- [4fb7ad2e] Add datacenter.create folder option +- [77ea6f88] Avoid vm.info panic against vcsim +- [95b2bc4d] Session persistence improvements +- [720bbd10] Add type attribute to soap.Fault Detail +- [ff7b5b0d] Add filtering for use of datastore service ticket +- [fe9d7b52] Add support for Finder lookup via moref +- [c26c7976] Use ticket HostName for Datastore http access +- [bea2a43c] Add govc/vm.markasvm command +- [9101528d] Add govc/vm.markastemplate command +- [982e64b8] Add vm.markastemplate + + +## [Release v0.7.1](https://github.com/vmware/govmomi/compare/v0.7.0...v0.7.1) + +> Release Date: 2016-06-03 + +### ⚠️ BREAKING + +### 📖 Commits + +- [2cad28d0] Fix Datastore upload/download against VC + + +## [Release v0.7.0](https://github.com/vmware/govmomi/compare/v0.6.2...v0.7.0) + +> Release Date: 2016-06-02 + +### ⚠️ BREAKING + +### 📖 Commits + +- [6906d301] Release 0.7.0 +- [558321df] Move InventoryPath field to object.Common +- [4147a6ae] Add -require flag to govc version command +- [d9fd9a4b] Add support for local type in datastore.create +- [650b5800] Fix vm.create disk scsi controller lookup +- [9463b5e5] Update changelog for govc to add datastore -namespace flag +- [4aab41b8] Update changelog with DatastoreNamespaceManager methods +- [4d6ea358] Support mkdir/rm of namespace on vsan +- [bb7e2fd7] InjectOvfEnv() should work with VSphere +- [91ca6bd5] Add host.service command +- [2f369a29] Add host.storage.mark command +- [b001e05b] Add -rescan option to host.storage.info command + + +## [Release v0.6.2](https://github.com/vmware/govmomi/compare/v0.6.1...v0.6.2) + +> Release Date: 2016-05-13 + +### ⚠️ BREAKING + +### 📖 Commits + +- [9051bd6b] Release 0.6.2 +- [3ab0d9b2] Get complete file details in Datastore.Stat +- [0c21607e] Convert types when possible +- [648d945a] Avoid xsi:type overwriting type attribute +- [4e0680c1] adding remove all snapshots to vm objects + + +## [Release v0.6.1](https://github.com/vmware/govmomi/compare/v0.6.0...v0.6.1) + +> Release Date: 2016-04-30 + +### ⚠️ BREAKING + +### 📖 Commits + +- [18154e51] Release 0.6.1 +- [47098806] Fix mo.Entity interface + + +## [Release v0.6.0](https://github.com/vmware/govmomi/compare/v0.5.0...v0.6.0) + +> Release Date: 2016-04-29 + +### ⚠️ BREAKING + +### 📖 Commits + +- [2c1d977a] Release 0.6.0 +- [cc686c51] Add folder.moveinto command +- [8e85a8d2] Add folder.{create,destroy,rename} methods +- [0ba22d24] Add Common.Rename method +- [61792ed3] Fix Finder.FolderList check +- [b6be92a1] Restore optional DatacenterFlag +- [53903a3a] Add OutputFlag support to govc about command +- [e66f7793] Add OptionManager and host.option commands +- [9d69fe4b] Add debug xmlformat script +- [f1786bec] Add option to use the same path for debug runs +- [99c8c5eb] Add folder.info command +- [eca4105a] Add datacenter.info command +- [71484c40] Add mo.Entity interface +- [388df2f1] Add helper to wait for multiple VM IPs +- [fc9f58d0] Add RevertToSnapshot +- [a4aca111] Add govc env command +- [ef17f4bd] Update CI config +- [fa91a600] Add host.account commands +- [44bb6d06] Update release install instructions +- [08ba4835] Leave AddressType empty in EthernetCardTypes +- [f9704e39] Add vm clone +- [e6969120] Add datastore.Download method +- [1aca660c] device.remove: add keep option + + +## [Release v0.5.0](https://github.com/vmware/govmomi/compare/v0.4.0...v0.5.0) + +> Release Date: 2016-03-30 + +### ⚠️ BREAKING + +### 📖 Commits + +- [c1b29993] Release 0.5.0 +- [b8549681] Use VirtualDeviceList for import.vmdk +- [cf96f70d] Remove debug flags from pool tests +- [f74a896d] Switch to int32 type for xsd int fields +- [074494df] Regenerate against 6.0u2 wsdl +- [ce9314c4] Include license header in generated files +- [957c8827] Add pointer field white list to generator +- [2c1d1950] Change pool recusive destroy to children destroy +- [5d34409f] Add dvs.portgroup.info command +- [216031c3] Update docs +- [f7dfcc98] Remove govc-test pools in teardown hook +- [556a9b17] Simplify pool destroy test +- [4e47b140] Add folder management to vm.create +- [7c33bcb3] Update test ESX IP in Drone secrets file +- [1b6ec477] Regenerate Drone secrets file +- [f64ea833] Implemented the ablitiy to tail the vSphere event stream - govc tail and force flag added to events command +- [fd7d320f] Including github.com/davecgh/go-spew/spew in go get +- [1d4efec0] Including github.com/davecgh/go-spew/spew in go get +- [424d3611] The -dump option now requests a recursive traversal as -json does +- [b45747f3] Added new -dump output flag for pretty printing underlying objects using davecgh/go-spew +- [a243716c] Run govc tests against ESX using Drone +- [fb75c63e] Double quotes network name to prevent space in name from failing the tests +- [564944ba] test_helper.bash updated to conditionally set env variables +- [c9c6e38f] Added new govc vm.disk.create -mode option for selecting one the VirtualDiskMode types +- [6922c88b] Add -net flag to device.info command +- [dff2c197] Fix VirtualDeviceList.CreateFloppy +- [c7d8cd3e] Ran gofmt on create.go +- [e077bcf5] Fix issue with optional UnitNumber (v2) +- [539ad504] Added arguments to govc vm.disk.create for thick provisioning and eager scrubbing, as requested in issue [#254](https://github.com/vmware/govmomi/issues/254) +- [e66c6df9] Handle import statement for types too +- [265d8bdb] Remove hardcoded urn:vim25 value from vim_wsdl.rb + + +## [Release v0.4.0](https://github.com/vmware/govmomi/compare/v0.3.0...v0.4.0) + +> Release Date: 2016-02-26 + +### ⚠️ BREAKING + +### 📖 Commits + +- [b3d202ab] Release 0.4.0 +- [749da321] Fix vm.change's ExtraConfig values being truncated at equal signs +- [13fbc59d] Add switch to specify protocol version in SOAPAction header +- [07013a97] Update CHANGELOG +- [bfe414fe] Allow vm.create to take datastore cluster argument +- [dda71761] Include reference to datastore in CreateDisk +- [855abdb3] Make NewKey function public +- [d0031106] Use custom datastore flags in vm.create +- [306b613d] Modify govc's vm.create to create VM in one shot +- [e96130b4] Add extra datastore arguments to vm.create +- [0a2da16d] Add datastore cluster methods to finder +- [c69e9bc1] Allow StoragePod type to be traversed +- [4d2ea3f4] added explicit path during clone +- [3d8eb102] Update missing property whitelist +- [779ae0a1] re-generate vim25 using 6.0 Update 1b (vimbase [#3024326](https://github.com/vmware/govmomi/issues/3024326)) +- [53c29f6a] Handle import statements same as include +- [a738f89d] Update govc.el URL +- [da2a249e] Doc updates +- [47e46425] govc.el: minor fixes for distribution as a package +- [8459ceb9] handle GOVC_TEST_URL=user:pass[@IP](https://github.com/IP) pattern +- [3b669760] Add Emacs interface to govc +- [7ec8028d] Update README to include Drone build status and local build instructions +- [2ec65fbe] Add config for Drone CI build +- [5437c466] introduce Datastore.Type() +- [983571af] introduce IsVC method and start using it +- [0732f137] Introduce AttachedClusterHosts +- [18945281] start using new helper functions for govc/flags +- [044d904a] Add some common functions to find/finder.go +- [534dabbd] Support vapp in pool.info command +- [4d9c6c72] Fix bats tests +- [5e04d5ca] Add -p and -a options to govc datastore.ls command +- [33963263] Added check for missing ovf deployment section + + +## [Release v0.3.0](https://github.com/vmware/govmomi/compare/v0.2.0...v0.3.0) + +> Release Date: 2016-01-15 + +### ⚠️ BREAKING + +### 📖 Commits + +- [501f6106] Mark 0.3.0 in change log +- [83a26512] Update contributors +- [995d970f] Print os.Args[0] in error messages +- [0a4c9782] Move stat function to object.Datastore +- [8a0d4217] Support VirtualApp in the lister +- [82734ef3] Support empty folder in SearchFlag.VirtualMachines +- [f64f878f] Add support for custom session keep alive handler +- [2d498658] Use OptionalBool for ExpandableReservation +- [ac9a39b0] Script to capture vpxd traffic on VCSA +- [3f473628] Script to capture and decrypt hostd SOAP traffic +- [eccc3e21] Move govc url.Parse wrapper to soap.ParseURL +- [e1031f44] Don't assume sshClient firewall rule is disabled +- [cd5d8baa] Let the lister recurse into a ComputeHost +- [b601a586] Specify the new entity's name upon import +- [a5e26981] Explicitly instantiate and register flags +- [aca77c67] Parameterize datastore in VM tests +- [37324472] Pass context to command and flag functions +- [6f955173] Minor optimization to encoding usage +- [0f4aee8b] Create VMFS datastore with datastore.create +- [ec724783] Add host storage commands +- [debdd854] Run license script +- [64022512] Fix license script to work for uncommitted files +- [5cb0c344] Remove host reference from HostFirewallSystem +- [4fb4052a] Change the comment that mentions ha-datacenter +- [b76ad0eb] Let the ESXi to figure out datastore name +- [918188dc] Add helper method to get VM power state +- [29a2f027] Add permissions.{ls,set,remove} commands +- [f27787a1] Add DatacenterFlag.ManagedObjects helper +- [0e629647] Option to disable API version check +- [42d899d0] Add commands to add and remove datastores +- [369e0e7f] Check host state in Datastore.AttachedHosts +- [7adf8375] Test that vm.info -r prints mo names +- [3198242e] Change ComputeResource.Hosts to return HostSystem +- [b34f346e] Support property collection for embedded types +- [8035c180] Fix vm nested hv option +- [b1d9d3c2] Update copyright years in code headers +- [c99e7bac] Add dvs commands +- [c30b7f17] Support DVS lookup in finder +- [094fbdfe] Embed Reference interface in NetworkReference +- [0657cf76] Add DVS helpers +- [6e96a1db] Add host.vnic.{service,info} commands +- [ae6b0b77] Add VsanSystem and VirtualNicManager wrappers +- [24297494] Add vsan flags to cluster.change command +- [4088502d] Add license.assigned.list id flag +- [d089489e] Add cluster.add license flag +- [31ee6e03] Add vm.change options to set hv/mmu +- [a414852e] Refactor host.add command to use HostConnectFlag +- [51543392] Add cluster.{create,change,add} commands +- [8262e1da] Add cluster related host commands +- [2443b364] Add HostConnectFlag +- [8ae7da82] Add object.HostSystem methods +- [0f630dd9] Add finder.Folder method +- [7cd5fbb5] Add bash function to save/load GOVC environments +- [12f26c21] Add object.Common.Destroy method +- [2ab8aa59] Add ComputeResource.Reconfigure method +- [5f47f155] Add flags.NewOptionalBool +- [25fe42b2] Add -feature flag to license list commands +- [2e6c0476] Add license.InfoList list wrapper +- [ef7371af] Add license assignment commands +- [5005e6e4] Add license.AssignmentManager +- [69a23bd4] Use object.Common in license.Manager +- [dbce3faf] Rename receiver variable for consistency +- [80705c11] Pass pointer to bps uint64 in last progress report +- [26e77c8e] VirtualMachine: Add Customize function on object.VirtualMachine +- [c2a78973] Add license.decode command +- [b3a7e07e] Add DistributedVirtualPortgroup support to vm.info +- [1b11ad02] Fix KeepAlive +- [3ecfd0db] Add HostFirewallSystem wrapper +- [9ded9c1a] KeepAlive support with certificate based login +- [cf2a879b] Add DiagnosticManager and logs commands +- [7b14760a] Update README.md +- [ad694500] Export Datastore.ServiceTicket method +- [76690239] Added a method to create snapshot of a virtual machine +- [6d4932af] Use service ticket for datastore file access +- [5fcc29f6] Fix vcsa ssh config +- [ac390ec8] Retry on empty result from property collector +- [f3041b2c] Add methods for client certificate based auth +- [b9edc663] Add extension manager and govc commands +- [9057659c] Fix key composition in building OVF spec +- [f56f6e80] Move OVF environment related code to env{,test}.go +- [b33c9aef] Add minimal doc to ovf package +- [3d40aefb] Added verbose option to the import.spec feature +- [1df0a81d] change for looking up a VM using instanceUUID +- [5f4d36cd] Introduce govc vapp.{info|destroy|power} +- [88795252] Handle the import.spec case where no spec file is provided +- [bcdc53fb] Add inventory path to govc info commands +- [305371a8] Collect govc host and pool info in one call +- [bfd47026] Relax the convention around importing an ova +- [3742a8aa] don't start goroutine while context is nil + + +## [Release v0.2.0](https://github.com/vmware/govmomi/compare/prerelease-v0.1.0-73-gfc131d4...v0.2.0) + +> Release Date: 2015-09-15 + +### ⏮ Reverts + +- [2900f2ff] Add Host information to vm.info + +### ⚠️ BREAKING + +### 📖 Commits + +- [b3315079] Mark 0.2.0 in change log +- [cc3bcbee] Add mode argument to release script +- [ae4a6e53] Build govc with new cross compilation facilities +- [4708d165] Derive CONTRIBUTORS from commit history +- [00909f48] Move contrib/ -> scripts/ +- [a0f4f799] Capitalization +- [13baa0e4] Split import functionality into independent flags +- [6363d0e2] Added ovf.Property output to import.spec +- [7af121df] Update change log +- [f9deb385] Fix event.Manager category cache +- [7f0a892d] Avoid tabwriter in events command +- [29601b46] Use vm.power force flag for hard shutdown/reboot +- [ea833cf5] Add VirtualDiskManager CreateVirtualDisk wrapper +- [bfabd01d] Interative clean up of bats testing +- [7cba62d9] Clean up of vcsa creation script +- [631d6228] Add serial port URI info to device.info output +- [0b31dcff] Add -json support to device.info command +- [54e324d1] Add govc vm.info resources option +- [9cc5d8f5] Add helper method to wait for virtual machine power state. +- [9ddd6337] Remove superfluous math.Pow calculations +- [5272b1e9] Added common method of humanizing byte strings +- [3145d146] Add helper method to check if VMware Tools is running in the guest OS. +- [e4f4c737] Misc clean up +- [01f2aed0] Add host name to vm.info +- [f24ec75a] Use property.Collector.Retrieve() in vm.info +- [a779c3b7] Renamed vm.info VmInfos back to VirtualMachines +- [2900f2ff] Revert "Add Host information to vm.info" +- [2a567478] Add -hints option to host.esxcli command +- [1f0708e2] Add options to importing an ovf or and ova file +- [debde780] Only retrieve "currentSession" property +- [b5187c16] Update CONTRIBUTORS +- [3e4ced8c] Added the ability to specify ovf properties during deployment +- [688a6b18] Introduce more VirtualApp methods +- [b1f0cb0c] Add flag to specify destination folder for import.ovf and import.ova +- [c9fcf1ce] Add check for error reading ova file +- [edb0a2cf] clone vmware/rbvmomi repo if it's missing +- [40c26fc6] use e.Object.Reference().Type as suggested by Doug +- [c1442f95] introduce CreateVApp and CreateChildVM_Task +- [25405362] add VirtualAppList and VirtualApp methods to Finder +- [121f075c] Add CustomFieldsManager wrapper and cli commands +- [dd016de3] include VirtualApp in ls -l output +- [b5db4d6d] Provide ability to override url username and password +- [11d5ae9c] Add OVF unmarshalling +- [135569e7] Update travis.yml for new infra +- [822432eb] Make govet stop complaining +- [baf9149e] Add datastore.info cli command +- [2b93c199] Add serial port matcher to SelectByBackingInfo +- [26ba22de] Merge branch 'gavrie-master' +- [62591576] Add Host information to vm.info +- [a90019ab] Add methods for useful properties to VirtualMachine +- [502963c4] Add Relocate method to VirtualMachine +- [7f4b6d38] Add String method to objects for pretty printing +- [99f57f16] Add events helpers and cli command +- [4c989ac3] Update CONTRIBUTORS +- [ad7d1917] Update to vim25/6.0 API +- [ad39adb9] Add net.address flag +- [1acf418c] Add Usage for host.esxcli +- [2e00fdb1] Modify archive.go bug + + +## [Release prerelease-v0.1.0-73-gfc131d4](https://github.com/vmware/govmomi/compare/prerelease-v0.1.0-62-g7734772...prerelease-v0.1.0-73-gfc131d4) + +> Release Date: 2015-07-13 + +### ⚠️ BREAKING + +### 📖 Commits + +- [e01555f9] Add command to add host to datacenter +- [efbd3293] Stop returning children from `ManagedObjectList` +- [d16670f5] Update CONTRIBUTORS +- [97fbf898] Mention GOVC_USERNAME and GOVC_PASSWORD in CHANGELOG +- [8766bda0] Add test to check for flag name collisions +- [791b3365] Remove flags for overriding username and password +- [85957949] include GOVC_USERNAME and GOVC_PASSWORD in govc README +- [8584259a] Export variables in release script + + +## [Release prerelease-v0.1.0-62-g7734772](https://github.com/vmware/govmomi/compare/prerelease-v0.1.0-52-g871f5d4...prerelease-v0.1.0-62-g7734772) + +> Release Date: 2015-07-06 + +### ⚠️ BREAKING + +### 📖 Commits + +- [14889008] Add test for GOVC_USERNAME and GOVC_PASSWORD +- [c0a984cd] Only run license tests against evaluation license +- [293ac813] Allow override of username and password +- [e053bdf2] Add extraConfig option to vm.change and vm.info +- [1dec0695] Update CONTRIBUTORS +- [985291d5] Add missing types to list.ToElement + + +## [Release prerelease-v0.1.0-52-g871f5d4](https://github.com/vmware/govmomi/compare/v0.1.0...prerelease-v0.1.0-52-g871f5d4) + +> Release Date: 2015-06-16 + +### ⏮ Reverts + +- [8bec13f7] Fix git dirty status error in build script + +### ⚠️ BREAKING + +### 📖 Commits + +- [871f5d4f] Add script to create a draft prerelease +- [8bec13f7] Revert "Fix git dirty status error in build script" +- [c825a3c7] Only use annotated tags to describe a version +- [66320cb0] Retry twice on temporary network errors in govc +- [67be5f1d] Add retry functionality to vim25 package +- [fba0548b] Add method to destroy a compute resource +- [2add2f7a] Add methods to add standalone or clustered hosts +- [de297fcb] Add ability to create, read and modify clusters +- [f10480af] Change finder functions to no longer take varargs +- [4bc93a66] Fix resource pool creation/modification +- [b434a9a8] Rename persist flag to persist-session +- [d85ad215] Ignore ManagedObjectNotFound in list results +- [4c497373] Add example that lists datastores +- [5d153787] Update govc CHANGELOG +- [0165e2de] Add flag to toggle persisting session to disk +- [8acb2f28] Add Mevan to CONTRIBUTORS +- [add15217] Ignore missing environmentBrowser field +- [447d18cd] Fix error when using SDRS datastores +- [e85f6d59] Find ComputeResource objects with find package +- [55f984e8] Test package only depends on vim25 +- [dbe47230] Drop omitempty tag from optional pointer fields +- [749f0bfa] Interpret negative values for unsigned fields +- [49a34992] Update CHANGELOG +- [263780f3] Update code to work with bool pointer fields +- [93aad8da] Make optional bool fields pointers +- [b7c51f61] Return errors for unexpected HTTP statuses +- [62ca329a] Abort client tests on errors +- [ae345e7f] Rename LICENSE file +- [a783a8c6] Add govc CHANGELOG +- [ba707586] Add commands to configure the autostart manager +- [af6a188e] Re-enable search index test +- [ceea450c] Update govc README +- [ea5c9a52] Fix git dirty status error in build script + + +## [Release v0.1.0](https://github.com/vmware/govmomi/compare/test...v0.1.0) + +> Release Date: 2015-03-17 + +### ⚠️ BREAKING + +### 📖 Commits + +- [477dcaf9] Cross-compile govc using gox +- [8593d9c7] Add version variable that can be set by the linker +- [fb38ca45] Add CHANGELOG +- [76f8f1a1] Add package docs to client.go +- [27bf35df] Use context.Context in client in root package +- [f3b8162f] Comment out broken test +- [a1d9d1e7] Drop the _gen filename suffix +- [91650a1f] Add context.Context argument to object package +- [1814113a] Use vim25.Client throughout codebase +- [b977114e] Move property retrieval functions to property package +- [8c3243d8] Add lightweight client structure to vim25 package +- [ec4b5b85] Add context.Context argument to find/list packages +- [7eecfbc7] Make Wait function in property package standalone +- [6c1982c8] Add keep alive for soap.RoundTripper +- [1324d1f0] Return nil UserSession when not authenticated +- [ae7ea3dd] Comments for task.Wait +- [a53a6b2c] Add context parameter to object.Task functions +- [f6f44097] Move functionality to wait for task to new package +- [ad2303cf] Move Ancestors function to vim25/mo +- [fb9e1439] Move PropertyCollector to new property package +- [a6618591] Move Reference to vim25/mo +- [bfdb90f1] Bind virtual machine to guest operation wrappers +- [ec0c16a7] Move HasFault to vim25/types +- [683ca537] Move wrappers for managed objects to object package +- [223a07f8] Add GetServiceContent function to vim25/soap +- [25b07674] Decouple factory functions from client +- [b96cf609] Move SessionManager to new session package +- [ea8d5d11] Return on error in SessionManager +- [7d58a49e] Mutate copy of parameter instead of parameter itself +- [e158fd95] Marshal soap.Client instead of govmomi.Client +- [1336ad45] Embed soap.Client in govmomi.Client +- [15cfd514] Work with pointer to url.URL +- [be2936f8] Move guest related wrappers to new guest package +- [b772ba28] Move LicenseManager to new license package +- [7ac1477f] Move EventManager to new event package +- [2053e065] Retrieve dependencies before running test +- [2d14321e] Add context.Context argument to RoundTripper +- [64f716b2] Include type of request in summarized debug log +- [40249c87] Store reference to http.Transport +- [ac77f0c5] Move debugging code in soap.Client to own struct +- [c8fab31b] Loosen .ovf match in ova.import +- [9f685e92] And further fixing the merge... go fmt. +- [8dbb438b] Merge remote-tracking branch 'upstream/master' into event_manager +- [e57a557c] created session manager wrapper +- [5525d5c6] Change return pattern in CreateDatacenter +- [8acd5512] Update contributors +- [7138d375] Coding style consistency +- [951e9194] added SessionIsActive to Client +- [2211e73d] Add CreateFolder method +- [eef40cc0] Add Login/Logout functions to client struct +- [3c7dea04] Update contributors +- [9c4a9202] Fixed error when attempting to access datastore +- [05ee0e62] Add PropertiesN function on client struct +- [01ee2fd5] Adding EventManager so that events can be queried for +- [8d10cfc7] Restrict permissions on session file +- [88b5d03c] Key session file off of entire URL +- [9354d314] Error types for getter functions on finder +- [a30287dc] Add description for pool.create +- [77466af0] Prefix option list in help output +- [cbb8d0b2] Create multiple resource pools through pool.create +- [8d4699d8] Add usage and description for pool.destroy +- [2e195a92] Change pool.change to take multiple arguments +- [38e4a2b2] Add usage and description for pool.info +- [2f286768] Add usage and description for pool.create +- [413fa901] Set insert_key = false +- [d6c2b33e] Update travis.yml +- [b878c20a] Adding CustomizationSpecManager +- [7c8f3e56] Add vm mark as vm and mark as template features +- [033d02e9] Update contributors +- [18919172] Add cpu and memory usage to host.info +- [b29f93c1] Adding the RegisterVM task. +- [e6bf8bb5] Add error types for Finder +- [852578b9] Support multiple hosts in host.info command +- [f1899c63] Set InventoryPath field +- [3a5c1cf3] Add InventoryPath field +- [624f21a4] Add resource pool cli commands +- [4c7cd61f] Add ResourcePool wrapper methods +- [761d43e5] Include ResourcePool in ls -l output +- [d2daf706] Support nested resource pools in lister +- [4d9d9a72] Add vm.change cli command +- [e6ebcd7f] bats fixup: destroy datacenter +- [65838131] Disable vcsa box password expiration +- [7a6e737b] Add CONTRIBUTORS file +- [1cbe968d] Issue [#192](https://github.com/vmware/govmomi/issues/192): HostSystem doesn't seem to be returning the correct host. +- [116a4044] fix a problem of ignored https_proxy environment variable with https scheme +- [df423c32] Add create and destroy datacenter to govc. +- [035bd12c] Usage for devices.{cdrom,floppy}.* +- [68e50dd3] make storage resource manager +- [b28d6f42] Specify default network in test helper +- [4b388e67] Fix boot order test +- [4414a07e] Expand vm.vnc command +- [e329e6e7] rename the session file for windows naming check +- [706520fa] use filepath for filesystem related path operations +- [ceb35f13] Add -f flag to datastore.rm +- [6498890f] Default VM memory to 1GiB +- [591b74f4] Include description for device.cdrom commands +- [815f0286] Add usage to device.cdrom.insert +- [f2209c2b] Flag description casing +- [5e52668c] Add usage to import commands +- [23cf4d35] Expand datastore.ls +- [bca8ef73] Expose underlying VimFault through Fault() function +- [90edb2bc] Add Usage() function to subset of commands +- [afdc145a] Implement subset of license manager +- [14765d07] Add net.adapter option to network flag +- [18c2cce0] Add CreateEthernetCard method +- [9b2730f0] Don't run vm.destroy if there is no input +- [611ced85] Add new ops to vm.power command +- [6cd9f466] Add VM power ops +- [7918063c] Work on README +- [db17cddd] Check minimum API version from client flag +- [df075430] Don't run datastore.rm if there is no input +- [e49a6d57] Move environment variables names into constants +- [2cfe267f] Add device.scsi command +- [6df44c1a] Support scsi disk.controller type in vm.create +- [39a60bbf] Add CreateSCSIController method +- [136fabe5] Rename vm.create disk.adapter to disk.controller +- [9c51314c] Change disk related commands to use new helpers +- [b0c895e5] Add VirtualDisk support to device helpers +- [a00f4545] Add helpers for creating disks +- [16283936] Add FindDiskController helper +- [dda056dc] Add VirtualDeviceList.FindSCSIController method +- [5402017a] FindByBackingInfo -> SelectByBackingInfo +- [0ff5759c] Add vm disk related bats tests +- [8f1e183a] Output disk file backing in device.info +- [e7cfba4b] Remove datastore test files +- [6b883be5] Use DeviceAdd helper in vm.network.add command +- [eb5881ae] Use device name in vm.network.change command +- [b7503468] Remove vm.network.remove command +- [0b81619a] Add vm.network.change cli command +- [0af5c4cf] Use VirtualDeviceList helpers in vm.network.remove +- [94c62da0] Add VirtualDeviceList FindByBackingInfo method +- [c247b80c] Move govc resource finders to govmomi/find package +- [5f0c8dd4] Add vm.info bats test +- [5d99454d] mv govc/flags/list -> govmomi/list +- [028bd3ff] Fix HostSystem.ResourcePool with cluster parent +- [48e25166] Add ls bats test +- [c5f24bce] Add host bats test +- [f965c9ad] Add default GOVC_HOST to vcsim_env +- [77fc8ade] Add network flag required test +- [b1236bf8] Add wrapper to manually run govc against vcsim +- [68831a1f] Fix network device.remove test +- [4649bf1f] Default vcsim box to 4G memory +- [b3f71333] Simplify vcsim_env helper +- [2ca11cde] Answer pending VM questions from govc +- [b6c3ff31] Move govc/test Vagrant boxes +- [b1b5b26e] Change network flag to use NetworkReference +- [83f49af7] Add network bats test +- [a8ffa576] Add NetworkReference interface +- [6fe62e29] Add vcsim_env helper +- [a616817d] Fix collapse_ws helper +- [0614961e] Add DistributedVirtualPortgroup constructor +- [1ddf6801] Cache esxcli command info +- [c713b974] Add table formatter to esxcli command +- [fd19a011] Include esxcli method info in response +- [3c9a436f] Explicit exit status check in assert_failure +- [5a63bc06] Collapse whitespace in assert_line helper +- [c9bd4312] Change vm.ip -esxcli to wait for ip +- [e97e5604] boot order test fixups +- [0e128e0d] 32M is plenty of memory for ttylinux +- [85ded933] Add test cleanup script +- [2bc707e7] Add device.serial cli commands +- [17fb283a] Add serial port device related helpers +- [d9b846d1] Add device.boot tests +- [b5a21e4e] Add device.floppy cli commands +- [d1d39fc3] Add floppy device related helpers +- [1e2c54c0] Refactor disk logic into disk.go +- [9dff8e74] Fix attach disk error checks +- [0f352ec3] Add vm.disk.attach +- [bdd7b37b] Refactor vm.disk.add to vm.disk.create +- [ae2e990e] Add govc functional tests +- [a707fae6] Fix alignment for 32-bit go +- [13274292] Default cli client url.User field +- [17df67ad] Add device.boot cli command +- [3c345ad7] Add device.ls -boot option +- [3b25234c] Add boot order related VirtualDeviceList helpers +- [f996c7d0] Add VirtualMachine BootOptions wrappers +- [4f3b935b] Add some DeviceType constants +- [86f90c52] Add VirtualDeviceList.Type method +- [5f3b95d7] Output MAC Address in device.info +- [58c3c64e] Add VirtualMachineList.PrimaryMacAddress helper +- [67fea291] Fix import.ovf with relative ovf source path +- [22602029] Support non-disk files in import.ovf +- [92175548] Add Upload.Headers field +- [f095536d] Fix import.ova command +- [5093303a] Add device related govc commands +- [18644254] Add device list related helpers +- [6803033e] Add device list helpers +- [4f8cd87c] Switch to BaseOptionValue for vm extra config +- [76662657] Regenerate types +- [46ec389f] Generate interface types for all base types +- [f78df469] Remove Client param from ResourcePool methods +- [ca3cd417] Add Client reference to ResourcePool +- [ffc306cc] Add Client reference to Network +- [c1138fc4] Remove Client param from HttpNfcLease methods +- [6f983a49] Add Client reference to HttpNfcLease +- [d2d566d0] Remove Client param from HostSystem methods +- [60bf1770] Add Client reference to HostSystem +- [e32542c1] Remove Client param from HostDatastoreBrowser methods +- [8956959a] Add Client reference to HostDatastoreBrowser +- [79e7da1d] Remove Client param from Folder methods +- [68b3e6dc] Add Client reference to Folder +- [da5b8ec0] Remove Client param from Datastore methods +- [f89dd25a] Add Client reference to Datastore +- [1b372efa] Remove Client param from Datacenter methods +- [ce320403] Add Client reference to Datacenter +- [b99a9529] Remove Client param from VirtualMachine methods +- [eb700d65] Add Client reference to VirtualMachine +- [673485e4] Remove config check from esxcli.GuestInfo.IpAddress +- [667df16a] Add VCSA Vagrant box +- [66b7daab] Use single consistent pattern to populate FlagSet +- [8fa06b5a] Export NewReference function +- [a4e11a3a] Check if info is nil before using it +- [8bbe7361] Add ManagedObject wrappers +- [9d5df71d] Add vm.ip -esxcli option +- [1818a2a6] Add esxcli helper for guest related info +- [ac6efdc9] Use vim.CLIInfo for esxcli command flags and help +- [5b9b34bc] Remove Cdrom function from disk flag +- [01d201ee] Use new esxcli command parser +- [7531d60e] New esxcli command parser +- [a27c9bd5] Refactor esxcli to esxcli.Executor +- [fdb2d2d0] Refactor unmarshal +- [2dd9910d] Add esxcli related types and methods +- [aad819e8] Add IsoFlag +- [df11fc04] Handle empty values in esxcli +- [6ceff6a4] Fix default network in NetworkFlag +- [bc39649d] Add DistributedVirtualPortgroup wrapper +- [a7eb1d1e] Add DVS support to NetworkFlag +- [71898a73] Support DistributedVirtualPortgroup in lister +- [1cf31f03] Regenerate mo types +- [1e7c1957] Generate mo types regardless of props +- [549a2712] tasks are no longer generated +- [fcf2cd94] Remove unused DiskFlag.Copy method +- [e494c312] Add DiskFlag adpater option diff --git a/vendor/github.com/vmware/govmomi/CONTRIBUTING.md b/vendor/github.com/vmware/govmomi/CONTRIBUTING.md index f6645cbf4..40537e9d9 100644 --- a/vendor/github.com/vmware/govmomi/CONTRIBUTING.md +++ b/vendor/github.com/vmware/govmomi/CONTRIBUTING.md @@ -1,101 +1,181 @@ -# Contributing to govmomi +# Contributing to `govmomi` ## Getting started First, fork the repository on GitHub to your personal account. -Note that _GOPATH_ can be any directory, the example below uses _$HOME/govmomi_. -Change _$USER_ below to your github username if they are not the same. - -``` shell -export GOPATH=$HOME/govmomi -go get github.com/vmware/govmomi -cd $GOPATH/src/github.com/vmware/govmomi -git config push.default nothing # anything to avoid pushing to vmware/govmomi by default -git remote rename origin vmware -git remote add $USER git@github.com:$USER/govmomi.git -git fetch $USER -``` - -## Installing from source +Note that `GOPATH` can be any directory, the example below uses `$HOME/govmomi`. +Change `$USER` below to your Github username if they are not the same. -Compile the govmomi libraries and install govc using: +```console +$ export GOPATH=$HOME/govmomi +$ go get github.com/vmware/govmomi -``` shell -go install -v github.com/vmware/govmomi/govc +$ cd $GOPATH/src/github.com/vmware/govmomi +$ git config push.default nothing # anything to avoid pushing to vmware/govmomi by default +$ git remote rename origin vmware +$ git remote add $USER git@github.com:$USER/govmomi.git +$ git fetch $USER ``` -Note that **govc/build.sh** is only used for building release binaries. - -## Contribution flow +## Contribution Flow This is a rough outline of what a contributor's workflow looks like: +- Create an issue describing the feature/fix - Create a topic branch from where you want to base your work. - Make commits of logical units. - Make sure your commit messages are in the proper format (see below). -- Update CHANGELOG.md and/or govc/CHANGELOG.md when appropriate. - Push your changes to a topic branch in your fork of the repository. -- Submit a pull request to vmware/govmomi. +- Submit a pull request to `vmware/govmomi`. + +See [below](#format-of-the-commit-message) for details on commit best practices +and **supported prefixes**, e.g. `govc: `. -Example: +### Example 1 - Fix a Bug in `govmomi` -``` shell -git checkout -b my-new-feature vmware/master -git commit -a -git push $USER my-new-feature +```console +$ git checkout -b issue- vmware/master +$ git add +$ git commit -m "fix: ..." -m "Closes: #" +$ git push $USER issue- ``` -### Stay in sync with upstream +### Example 2 - Add a new (non-breaking) API to `govmomi` -When your branch gets out of sync with the vmware/master branch, use the following to update: +```console +$ git checkout -b issue- vmware/master +$ git add +$ git commit -m "Add API ..." -m "Closes: #" +$ git push $USER issue- +``` + +### Example 3 - Add a Feature to `govc` -``` shell -git checkout my-new-feature -git fetch -a -git rebase vmware/master -git push --force-with-lease $USER my-new-feature +```console +$ git checkout -b issue- vmware/master +$ git add +$ git commit -m "govc: Add feature ..." -m "Closes: #" +$ git push $USER issue- ``` -### Updating pull requests +### Example 4 - Fix a Bug in `vcsim` + +```console +$ git checkout -b issue- vmware/master +$ git add +$ git commit -m "vcsim: Fix ..." -m "Closes: #" +$ git push $USER issue- +``` -If your PR fails to pass CI or needs changes based on code review, you'll most likely want to squash these changes into -existing commits. +### Example 5 - Document Breaking (API) Changes -If your pull request contains a single commit or your changes are related to the most recent commit, you can simply -amend the commit. +Breaking changes, e.g. to the `govmomi` APIs, are highlighted in the `CHANGELOG` +and release notes when the keyword `BREAKING:` is used in the commit message +body. -``` shell -git add . -git commit --amend -git push --force-with-lease $USER my-new-feature +The text after `BREAKING:` is used in the corresponding highlighted section. +Thus these details should be stated at the body of the commit message. +Multi-line strings are supported. + +```console +$ git checkout -b issue- vmware/master +$ git add +$ cat << EOF | git commit -F - +Add ctx to funcXYZ + +This commit introduces context.Context to function XYZ +Closes: #1234 + +BREAKING: Add ctx to funcXYZ() +EOF + +$ git push $USER issue- +``` + +### Stay in sync with Upstream + +When your branch gets out of sync with the vmware/master branch, use the +following to update (rebase): + +```console +$ git checkout issue- +$ git fetch -a +$ git rebase vmware/master +$ git push --force-with-lease $USER issue- +``` + +### Updating Pull Requests + +If your PR fails to pass CI or needs changes based on code review, you'll most +likely want to squash these changes into existing commits. + +If your pull request contains a single commit or your changes are related to the +most recent commit, you can simply amend the commit. + +```console +$ git add . +$ git commit --amend +$ git push --force-with-lease $USER issue- ``` If you need to squash changes into an earlier commit, you can use: -``` shell -git add . -git commit --fixup -git rebase -i --autosquash vmware/master -git push --force-with-lease $USER my-new-feature +```console +$ git add . +$ git commit --fixup +$ git rebase -i --autosquash vmware/master +$ git push --force-with-lease $USER issue- ``` -Be sure to add a comment to the PR indicating your new changes are ready to review, as github does not generate a -notification when you git push. +Be sure to add a comment to the PR indicating your new changes are ready to +review, as Github does not generate a notification when you git push. -### Code style +### Code Style -The coding style suggested by the Golang community is used in govmomi. See the +The coding style suggested by the Go community is used in `govmomi`. See the [style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details. -Try to limit column width to 120 characters for both code and markdown documents such as this one. +Try to limit column width to 120 characters for both code and markdown documents +such as this one. ### Format of the Commit Message -We follow the conventions on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/). +We follow the conventions described in [How to Write a Git Commit +Message](http://chris.beams.io/posts/git-commit/). + +Be sure to include any related GitHub issue references in the commit message, +e.g. `Closes: #`. + +The [`CHANGELOG.md`](./CHANGELOG.md) and release page uses **commit message +prefixes** for grouping and highlighting. A commit message that +starts with `[prefix:] ` will place this commit under the respective +section in the `CHANGELOG`. + +The following example creates a commit referencing the `issue: 1234` and puts +the commit message in the `govc` `CHANGELOG` section: + +```console +$ git commit -s -m "govc: Add CLI command X" -m "Closes: #1234" +``` + +Currently the following prefixes are used: + +- `govc:` - Use for changes to `govc` CLI +- `vcsim:` - Use for changes to vCenter Simulator +- `chore:` - Use for repository related activities +- `fix:` - Use for bug fixes +- `docs:` - Use for changes to the documentation +- `examples:` - Use for changes to examples + +### Running CI Checks and Tests +You can run both `make check` and `make test` from the top level of the +repository. -Be sure to include any related GitHub issue references in the commit message. +While `make check` will catch formatting and import errors, it will not apply +any fixes. The developer is expected to do that. ## Reporting Bugs and Creating Issues -When opening a new issue, try to roughly follow the commit message format conventions above. +When opening a new issue, try to roughly follow the commit message format +conventions above. diff --git a/vendor/github.com/vmware/govmomi/CONTRIBUTORS b/vendor/github.com/vmware/govmomi/CONTRIBUTORS index 6a421d099..8a23a1d82 100644 --- a/vendor/github.com/vmware/govmomi/CONTRIBUTORS +++ b/vendor/github.com/vmware/govmomi/CONTRIBUTORS @@ -5,104 +5,213 @@ Abhijeet Kasurde abrarshivani +Adam Chalkley Adam Shannon +Al Biheiri Alessandro Cortiana +Alex Alex Bozhenko Alex Ellis (VMware) +Aligator <8278538+yet-another-aligator@users.noreply.github.com> Alvaro Miranda Amanda H. L. de Andrade +amanpaha Amit Bathla amit bezalel Andrew Andrew Chin Andrew Kutz +Andrey Klimentyev Anfernee Yongkun Gui angystardust aniketGslab +Ankit Vaidya +Ankur Huralikoppi +Anna Carrigan +Ariel Chinn Arran Walker +Artem Anisimov +Arunesh Pandey Aryeh Weinreb +Augy StClair Austin Parker Balu Dontu bastienbc +Ben Corrie +Ben Vickers +Benjamin Davini +Benjamin Peterson +Bhavya Choudhary Bob Killen Brad Fitzpatrick +brian57860 Bruce Downs Cédric Blomart +Cheng Cheng +Chethan Venkatesh Chris Marchesi Christian Höltje Clint Greenwood +cpiment CuiHaozhi +Dan Ilan +Dan Norris +Daniel Frederick Crisman +Daniel Mueller Danny Lockard +Dave Gress +Dave Smith-Uchida Dave Tucker -Davide Agnello +David Gress David Stark +Davide Agnello Davinder Kumar +demarey +dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Deric Crago +Divyen Patel +Dnyanesh Gate Doug MacEachern +East <60801291+houfangdong@users.noreply.github.com> Eloy Coto +Eric Edens +Eric Graham <16710890+Pheric@users.noreply.github.com> Eric Gray Eric Yutao Erik Hollensbe Ethan Kaley +Evan Chu Fabio Rapposelli -Faiyaz Ahmed +Faiyaz Ahmed +Federico Pellegatta <12744504+federico-pellegatta@users.noreply.github.com> forkbomber +François Rigault freebsdly Gavin Gray Gavrie Philipson George Hicken Gerrit Renker gthombare +HakanSunay Hasan Mahmood +Heiko Reese Henrik Hodne +hkumar +hui luo +Ian Eyberg Isaac Rodman +Ivan Mikushin Ivan Porto Carrero James King +James Peach Jason Kincl Jeremy Canady jeremy-clerc Jiatong Wang +jingyizPensando João Pereira Jonas Ausevicius Jorge Sevilla +Julien PILLON +Justin J. Novack kayrus Kevin George +Knappek +Leslie Wang leslie-qiwa +Lintong Jiang +Liping Xue Louie Jiang +Luther Monson +Madanagopal Arunachalam maplain Marc Carmier +Marcus Tan Maria Ntalla Marin Atanasov Nikolov +Mario Trangoni +Mark Peek Matt Clay -Matthew Cosgrove +Matt Moore Matt Moriarity +Matthew Cosgrove +mbhadale +Merlijn Sebrechts Mevan Samaratunga +Michael Gasch Michal Jankowski +Mincho Tonev mingwei Nicolas Lamirault +Nikhil Kathare +Nikhil R Deshpande +Nikolas Grottendieck +Om Kumar Omar Kohl Parham Alvani +Parveen Chahal +Paul Martin <25058109+rawstorage@users.noreply.github.com> Pierre Gronlier Pieter Noordhuis prydin +rconde01 +rHermes Rowan Jacobs +Roy Ling +rsikdar runner.mei S.Çağlar Onur +Saad Malik +Sandeep Pissay Srinivasa Rao +Scott Holden Sergey Ignatov +serokles +shahra +Shalini Bhaskara +Shaozhen Ding +Shawn Neal +shylasrinivas +sky-joker +Sten Feldman +Stepan Mazurov Steve Purcell +SUMIT AGRAWAL +Syuparn Takaaki Furukawa Tamas Eger +Tanay Kothari tanishi Ted Zlatanov +Thad Craft Thibaut Ackermann +Tim McNamara +Tjeu Kayim <15987676+TjeuKayim@users.noreply.github.com> +Toomas Pelberg Trevor Dawe +tshihad Uwe Bessle Vadim Egorov Vikram Krishnamurthy +volanja Volodymyr Bobyr +Waldek Maleska +William Lam Witold Krecicki +xing-yang Yang Yang +yangxi +Yann Hodique +Yash Nitin Desai +Yassine TIJANI +yiyingy +ykakarap +Yogesh Sobale <6104071+ysobale@users.noreply.github.com> +Yue Yin +Yun Zhou Yuya Kusakabe -Zacharias Taubert +Zach G Zach Tucker +Zacharias Taubert Zee Yang +zyuxin +Кузаков Евгений diff --git a/vendor/github.com/vmware/govmomi/Dockerfile b/vendor/github.com/vmware/govmomi/Dockerfile deleted file mode 100644 index 4f84fadaa..000000000 --- a/vendor/github.com/vmware/govmomi/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM scratch -LABEL maintainer="fabio@vmware.com" -COPY govc / -ENTRYPOINT [ "/govc" ] \ No newline at end of file diff --git a/vendor/github.com/vmware/govmomi/Dockerfile.govc b/vendor/github.com/vmware/govmomi/Dockerfile.govc new file mode 100644 index 000000000..df4b56831 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/Dockerfile.govc @@ -0,0 +1,45 @@ +# Create a builder container +# golang:1.16.0-buster amd64 +FROM golang@sha256:f254180c5defa2653955e963fb0626e3d4fbbb162f7cff6490e94607d1d867ff AS build +WORKDIR /go/src/app + +# Create appuser to isolate potential vulnerabilities +# See https://stackoverflow.com/a/55757473/12429735 +ENV USER=appuser +ENV UID=10001 +RUN adduser \ + --disabled-password \ + --gecos "" \ + --shell "/sbin/nologin" \ + --no-create-home \ + --uid "${UID}" \ + "${USER}" + +# Create a new tmp directory so no bad actors can manipulate it +RUN mkdir /temporary-tmp-directory && chmod 777 /temporary-tmp-directory + +############################################################################### +# Final stage +FROM scratch + +# Allow container to use latest TLS certificates +COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ + +# Copy over appuser to run as non-root +COPY --from=build /etc/passwd /etc/passwd +COPY --from=build /etc/group /etc/group + +# Copy over the /tmp directory for golang/os.TmpDir +COPY --chown=appuser --from=build /temporary-tmp-directory /tmp + +# Copy application from external build +COPY govc /govc + +# Run all commands as non-root +USER appuser:appuser + +# session cache, etc +ENV GOVMOMI_HOME=/tmp + +# Set CMD to application with container defaults +CMD ["/govc"] diff --git a/vendor/github.com/vmware/govmomi/Dockerfile.vcsim b/vendor/github.com/vmware/govmomi/Dockerfile.vcsim new file mode 100644 index 000000000..31f48a979 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/Dockerfile.vcsim @@ -0,0 +1,46 @@ +# Create a builder container +# golang:1.16.0-buster amd64 +FROM golang@sha256:f254180c5defa2653955e963fb0626e3d4fbbb162f7cff6490e94607d1d867ff AS build +WORKDIR /go/src/app + +# Create appuser to isolate potential vulnerabilities +# See https://stackoverflow.com/a/55757473/12429735 +ENV USER=appuser +ENV UID=10001 +RUN adduser \ + --disabled-password \ + --gecos "" \ + --home "/nonexistent" \ + --shell "/sbin/nologin" \ + --no-create-home \ + --uid "${UID}" \ + "${USER}" + +# Create a new tmp directory so no bad actors can manipulate it +RUN mkdir /temporary-tmp-directory && chmod 777 /temporary-tmp-directory + +############################################################################### +# Final stage +FROM scratch + +# Run all commands as non-root +USER appuser:appuser + +# Allow container to use latest TLS certificates +COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ + +# Copy over appuser to run as non-root +COPY --from=build /etc/passwd /etc/passwd +COPY --from=build /etc/group /etc/group + +# Copy over the /tmp directory for golang/os.TmpDir +COPY --chown=appuser --from=build /temporary-tmp-directory /tmp + +# Expose application port +EXPOSE 8989 + +# Copy application from external build +COPY vcsim /vcsim + +# Set entrypoint to application with container defaults +CMD ["/vcsim", "-l", "0.0.0.0:8989"] diff --git a/vendor/github.com/vmware/govmomi/Gopkg.lock b/vendor/github.com/vmware/govmomi/Gopkg.lock deleted file mode 100644 index f45057fb2..000000000 --- a/vendor/github.com/vmware/govmomi/Gopkg.lock +++ /dev/null @@ -1,60 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "improvements" - digest = "1:b183578c34fabccaf65f1a57d2efeec2086abdce1446978d69ab3a0016cb750c" - name = "github.com/davecgh/go-xdr" - packages = ["xdr2"] - pruneopts = "NUT" - revision = "4930550ba2e22f87187498acfd78348b15f4e7a8" - source = "https://github.com/rasky/go-xdr" - -[[projects]] - digest = "1:1ab18cf8c2084968d6dca0dd46fbda9efba08664ecd7957b63c7ca57bb2455df" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "NUT" - revision = "6a5e28554805e78ea6141142aba763936c4761c0" - -[[projects]] - branch = "govmomi" - digest = "1:f49ed6cb2129e9a3ce9dde5037cb243b5849c0ec0c7973b9d1e987872d8b8cc6" - name = "github.com/kr/pretty" - packages = ["."] - pruneopts = "NUT" - revision = "2ee9d7453c02ef7fa518a83ae23644eb8872186a" - source = "https://github.com/dougm/pretty" - -[[projects]] - branch = "master" - digest = "1:c3a7836b5904db0f8b609595b619916a6831cb35b8b714aec39f96d00c6155d8" - name = "github.com/kr/text" - packages = ["."] - pruneopts = "NUT" - revision = "7cafcd837844e784b526369c9bce262804aebc60" - -[[projects]] - branch = "master" - digest = "1:4bea31865971675c482ed875caeabe7d2182dcb47d52900b7da5236d66dc9970" - name = "github.com/vmware/vmw-guestinfo" - packages = [ - "bdoor", - "message", - "vmcheck", - ] - pruneopts = "NUT" - revision = "25eff159a728be87e103a0b8045e08273f4dbec4" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/davecgh/go-xdr/xdr2", - "github.com/google/uuid", - "github.com/kr/pretty", - "github.com/vmware/vmw-guestinfo/message", - "github.com/vmware/vmw-guestinfo/vmcheck", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/vmware/govmomi/Gopkg.toml b/vendor/github.com/vmware/govmomi/Gopkg.toml deleted file mode 100644 index 4c4d6765e..000000000 --- a/vendor/github.com/vmware/govmomi/Gopkg.toml +++ /dev/null @@ -1,19 +0,0 @@ -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# Refer to https://github.com/toml-lang/toml for detailed TOML docs. - -[prune] - non-go = true - go-tests = true - unused-packages = true - -[[constraint]] - branch = "improvements" - name = "github.com/davecgh/go-xdr" - source = "https://github.com/rasky/go-xdr" - -[[constraint]] - branch = "govmomi" - name = "github.com/kr/pretty" - source = "https://github.com/dougm/pretty" diff --git a/vendor/github.com/vmware/govmomi/Makefile b/vendor/github.com/vmware/govmomi/Makefile index ee17fe586..1be0f391e 100644 --- a/vendor/github.com/vmware/govmomi/Makefile +++ b/vendor/github.com/vmware/govmomi/Makefile @@ -1,29 +1,158 @@ -.PHONY: test +# Copyright (c) 2021 VMware, Inc. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +# If you update this file, please follow +# https://suva.sh/posts/well-documented-makefiles + +# Ensure Make is run with bash shell as some syntax below is bash-specific +SHELL := /usr/bin/env bash + +# Print the help/usage when make is executed without any other arguments +.DEFAULT_GOAL := help + + +## -------------------------------------- +## Help +## -------------------------------------- + +.PHONY: help +help: ## Display usage + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make [target] \033[36m\033[0m\n\nTargets:\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 }' $(MAKEFILE_LIST) + + +## -------------------------------------- +## Locations and programs +## -------------------------------------- + +# Directories +BIN_DIR := bin +TOOLS_DIR := hack/tools +TOOLS_BIN_DIR := $(TOOLS_DIR)/bin + +# Tooling binaries +GO ?= $(shell command -v go 2>/dev/null) +GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint + + +## -------------------------------------- +## Prerequisites +## -------------------------------------- + +# Do not proceed unless the go binary is present. +ifeq (,$(strip $(GO))) +$(error The "go" program cannot be found) +endif + + +## -------------------------------------- +## Linting and fixing linter errors +## -------------------------------------- + +.PHONY: lint +lint: ## Run all the lint targets + $(MAKE) lint-go-full + +GOLANGCI_LINT_FLAGS ?= --fast=true +.PHONY: lint-go +lint-go: $(GOLANGCI_LINT) ## Lint codebase + $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_FLAGS) + +.PHONY: lint-go-full +lint-go-full: GOLANGCI_LINT_FLAGS = --fast=false +lint-go-full: lint-go ## Run slower linters to detect possible issues -all: check test +.PHONY: fix +fix: GOLANGCI_LINT_FLAGS = --fast=false --fix +fix: lint-go ## Tries to fix errors reported by lint-go-full target -check: goimports govet +.PHONY: check +check: lint-go-full +check: ## Run linters -goimports: - @echo checking go imports... - @command -v goimports >/dev/null 2>&1 || go get golang.org/x/tools/cmd/goimports - @! goimports -d . 2>&1 | egrep -v '^$$' -govet: - @echo checking go vet... - @go tool vet -structtags=false -methods=false $$(find . -mindepth 1 -maxdepth 1 -type d -not -name vendor) +## -------------------------------------- +## Tooling Binaries +## -------------------------------------- -install: +TOOLING_BINARIES := $(GOLANGCI_LINT) +tools: $(TOOLING_BINARIES) ## Build tooling binaries +.PHONY: $(TOOLING_BINARIES) +$(TOOLING_BINARIES): + cd $(TOOLS_DIR); make $(@F) + + +## -------------------------------------- +## Build / Install +## -------------------------------------- +.PHONY: install +install: ## Install govc and vcsim $(MAKE) -C govc install $(MAKE) -C vcsim install -go-test: - GORACE=history_size=5 go test -timeout 5m -count 1 -race -v $(TEST_OPTS) ./... -govc-test: install - (cd govc/test && ./vendor/github.com/sstephenson/bats/libexec/bats -t .) +## -------------------------------------- +## Generate +## -------------------------------------- + +.PHONY: mod +mod: ## Runs go mod tidy to validate modules + go mod tidy -v -test: go-test govc-test +.PHONY: mod-get +mod-get: ## Downloads and caches the modules + go mod download +.PHONY: doc doc: install +doc: ## Generates govc USAGE.md ./govc/usage.sh > ./govc/USAGE.md + + +## -------------------------------------- +## Tests +## -------------------------------------- + +# Test options +TEST_COUNT ?= 1 +TEST_TIMEOUT ?= 5m +TEST_RACE_HISTORY_SIZE ?= 5 +GORACE ?= history_size=$(TEST_RACE_HISTORY_SIZE) + +ifeq (-count,$(findstring -count,$(TEST_OPTS))) +$(error Use TEST_COUNT to override this option) +endif + +ifeq (-race,$(findstring -race,$(TEST_OPTS))) +$(error The -race flag is enabled by default & cannot be specified in TEST_OPTS) +endif + +ifeq (-timeout,$(findstring -timeout,$(TEST_OPTS))) +$(error Use TEST_TIMEOUT to override this option) +endif + +.PHONY: go-test +go-test: ## Runs go unit tests with race detector enabled + GORACE=$(GORACE) $(GO) test \ + -count $(TEST_COUNT) \ + -race \ + -timeout $(TEST_TIMEOUT) \ + -v $(TEST_OPTS) \ + ./... + +.PHONY: govc-test +govc-test: install +govc-test: ## Runs govc bats tests + ./govc/test/images/update.sh + (cd govc/test && ./vendor/github.com/sstephenson/bats/libexec/bats -t .) + +.PHONY: govc-test-sso +govc-test-sso: install + ./govc/test/images/update.sh + (cd govc/test && SSO_BATS=1 ./vendor/github.com/sstephenson/bats/libexec/bats -t sso.bats) + +.PHONY: govc-test-sso-assert-cert +govc-test-sso-assert-cert: + SSO_BATS_ASSERT_CERT=1 $(MAKE) govc-test-sso + +.PHONY: test +test: go-test govc-test ## Runs go-test and govc-test diff --git a/vendor/github.com/vmware/govmomi/README.md b/vendor/github.com/vmware/govmomi/README.md index 1c31bbdda..7440c627a 100644 --- a/vendor/github.com/vmware/govmomi/README.md +++ b/vendor/github.com/vmware/govmomi/README.md @@ -1,5 +1,9 @@ -[![Build Status](https://travis-ci.org/vmware/govmomi.png?branch=master)](https://travis-ci.org/vmware/govmomi) +[![Build](https://github.com/vmware/govmomi/actions/workflows/govmomi-build.yaml/badge.svg)](https://github.com/vmware/govmomi/actions/workflows/govmomi-build.yaml) +[![Tests](https://github.com/vmware/govmomi/actions/workflows/govmomi-go-tests.yaml/badge.svg)](https://github.com/vmware/govmomi/actions/workflows/govmomi-go-tests.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/vmware/govmomi)](https://goreportcard.com/report/github.com/vmware/govmomi) +[![Latest Release](https://img.shields.io/github/release/vmware/govmomi.svg?logo=github&style=flat-square)](https://github.com/vmware/govmomi/releases/latest) +[![Go Reference](https://pkg.go.dev/badge/github.com/vmware/govmomi.svg)](https://pkg.go.dev/github.com/vmware/govmomi) +[![go.mod Go version](https://img.shields.io/github/go-mod/go-version/vmware/govmomi)](https://github.com/vmware/govmomi) # govmomi @@ -15,9 +19,9 @@ In addition to the vSphere API client, this repository includes: ## Compatibility -This library is built for and tested against ESXi and vCenter 6.0, 6.5 and 6.7. +This library is built for and tested against ESXi and vCenter 6.5, 6.7 and 7.0. -It may work with versions 5.5 and 5.1, but neither are officially supported. +It may work with versions 5.1, 5.5 and 6.0, but neither are officially supported. ## Documentation @@ -28,15 +32,23 @@ The code in the `govmomi` package is a wrapper for the code that is generated fr It primarily provides convenience functions for working with the vSphere API. See [godoc.org][godoc] for documentation. -[apiref]:http://pubs.vmware.com/vsphere-6-5/index.jsp#com.vmware.wssdk.apiref.doc/right-pane.html +[apiref]:https://code.vmware.com/apis/968/vsphere [godoc]:http://godoc.org/github.com/vmware/govmomi ## Installation -```sh -go get -u github.com/vmware/govmomi +### govmomi (Package) + +```console +$ go get -u github.com/vmware/govmomi ``` +### Binaries and Docker Images for `govc` and `vcsim` + +Installation instructions, released binaries and Docker images are documented in +the respective README files of [`govc`](govc/README.md) and +[`vcsim`](vcsim/README.md). + ## Discussion Contributors and users are encouraged to collaborate using GitHub issues and/or @@ -50,6 +62,7 @@ Changes to the API are subject to [semantic versioning](http://semver.org). Refer to the [CHANGELOG](CHANGELOG.md) for version to version changes. ## Projects using govmomi +* [collectd-vsphere](https://github.com/travis-ci/collectd-vsphere) * [Docker Machine](https://github.com/docker/machine/tree/master/drivers/vmwarevsphere) @@ -57,31 +70,37 @@ Refer to the [CHANGELOG](CHANGELOG.md) for version to version changes. * [Docker LinuxKit](https://github.com/linuxkit/linuxkit/tree/master/src/cmd/linuxkit) -* [Kubernetes](https://github.com/kubernetes/kubernetes/tree/master/pkg/cloudprovider/providers/vsphere) +* [Gru](https://github.com/dnaeon/gru) + +* [Juju](https://github.com/juju/juju) -* [Kubernetes Cloud Provider](https://github.com/kubernetes/cloud-provider-vsphere) +* [Kubernetes vSphere Cloud Provider](https://github.com/kubernetes/cloud-provider-vsphere) * [Kubernetes Cluster API](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere) * [Kubernetes kops](https://github.com/kubernetes/kops/tree/master/upup/pkg/fi/cloudup/vsphere) -* [Terraform](https://github.com/terraform-providers/terraform-provider-vsphere) +* [Libretto](https://github.com/apcera/libretto/tree/master/virtualmachine/vsphere) -* [Packer](https://github.com/jetbrains-infra/packer-builder-vsphere) +* [Open Storage](https://github.com/libopenstorage/openstorage/tree/master/pkg/storageops/vsphere) -* [VMware VIC Engine](https://github.com/vmware/vic) +* [OPS](https://github.com/nanovms/ops) -* [Travis CI](https://github.com/travis-ci/jupiter-brain) +* [Packer](https://github.com/jetbrains-infra/packer-builder-vsphere) -* [collectd-vsphere](https://github.com/travis-ci/collectd-vsphere) +* [Rancher](https://github.com/rancher/rancher/blob/master/pkg/api/norman/customization/vsphere/listers.go) -* [Gru](https://github.com/dnaeon/gru) +* [Terraform](https://github.com/terraform-providers/terraform-provider-vsphere) -* [Libretto](https://github.com/apcera/libretto/tree/master/virtualmachine/vsphere) +* [Travis CI](https://github.com/travis-ci/jupiter-brain) * [Telegraf](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/vsphere) -* [Open Storage](https://github.com/libopenstorage/openstorage/tree/master/pkg/storageops/vsphere) +* [VMware Event Broker Appliance](https://github.com/vmware-samples/vcenter-event-broker-appliance/tree/development/vmware-event-router) + +* [VMware VIC Engine](https://github.com/vmware/vic) + +* [vSphere 7.0](https://docs.vmware.com/en/VMware-vSphere/7.0/rn/vsphere-esxi-vcenter-server-7-vsphere-with-kubernetes-release-notes.html) ## Related projects @@ -89,6 +108,14 @@ Refer to the [CHANGELOG](CHANGELOG.md) for version to version changes. * [pyvmomi](https://github.com/vmware/pyvmomi) +* [go-vmware-nsxt](https://github.com/vmware/go-vmware-nsxt) + ## License govmomi is available under the [Apache 2 license](LICENSE.txt). + +## Name + +Pronounced "go-v-mom-ie" + +Follows pyvmomi and rbvmomi: language prefix + the vSphere acronym "VM Object Management Infrastructure". diff --git a/vendor/github.com/vmware/govmomi/RELEASE.md b/vendor/github.com/vmware/govmomi/RELEASE.md new file mode 100644 index 000000000..45ed3900b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/RELEASE.md @@ -0,0 +1,83 @@ +# How to create a new `govmomi` Release on Github + +On every new tag matching `v*` pushed to the repository a Github Action Release +Workflow is executed. + +The Github Actions release [workflow](.github/workflows/govmomi-release.yaml) +uses [`goreleaser`](http://goreleaser.com/) ([configuration +file](.goreleaser.yml)) and automatically creates/pushes: + +- Release artifacts for `govc` and `vcsim` to the + [release](https://github.com/vmware/govmomi/releases) page, including + `LICENSE.txt`, `README` and `CHANGELOG` +- Docker images for `vmware/govc` and `vmware/vcsim` to Docker Hub +- Source code + +⚠️ **Note:** These steps can only be performed by maintainers or administrators +of this project. + +## Verify `master` branch is up to date with the remote + +```console +git checkout master +git fetch -avp +git diff master origin/master + +# if your local and remote branches diverge run +git pull origin/master +``` + +⚠️ **Note:** These steps assume `origin` to point to the remote +`https://github.com/vmware/govmomi`, respectively +`git@github.com:vmware/govmomi`. + +## Verify `make docs` and `CONTRIBUTORS` are up to date + +⚠️ Run the following commands and commit (PR) any changes before proceeding with +the release. + +```console +make doc +./scripts/contributors.sh +if [ -z "$(git status --porcelain)" ]; then + echo "working directory clean: proceed with release" +else + echo "working directory dirty: please commit changes" +fi +``` + + +## Set `RELEASE_VERSION` variable + +This variable is used and referenced in the subsequent commands. Set it to the +**upcoming** release version, adhering to the [semantic +versioning](https://semver.org/) scheme: + +```console +export RELEASE_VERSION=v0.27.0 +``` + +## Create the Git Tag + +```console +git tag -a ${RELEASE_VERSION} -m "Release ${RELEASE_VERSION}" +``` + +## Push the new Tag + +```console +# Will trigger Github Actions Release Workflow +git push origin refs/tags/${RELEASE_VERSION} +``` + +## Verify Github Action Release Workflow + +After pushing a new release tag, the status of the workflow can be inspected +[here](https://github.com/vmware/govmomi/actions/workflows/govmomi-release.yaml). + +![Release](static/release-workflow.png "Successful Release Run") + +After a successful release, a pull request is automatically created by the +Github Actions bot to update the [CHANGELOG](CHANGELOG.md). This `CHANGELOG.md` +is also generated with `git-chglog` but uses a slightly different template +(`.chglog/CHANGELOG.tpl.md`) for rendering (issue/PR refs are excluded). diff --git a/vendor/github.com/vmware/govmomi/find/finder.go b/vendor/github.com/vmware/govmomi/find/finder.go index 03767fc35..61ac780c4 100644 --- a/vendor/github.com/vmware/govmomi/find/finder.go +++ b/vendor/github.com/vmware/govmomi/find/finder.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2020 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,9 +22,11 @@ import ( "path" "strings" + "github.com/vmware/govmomi/internal" "github.com/vmware/govmomi/list" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/view" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" @@ -38,16 +40,26 @@ type Finder struct { folders *object.DatacenterFolders } -func NewFinder(client *vim25.Client, all bool) *Finder { +func NewFinder(client *vim25.Client, all ...bool) *Finder { + props := false + if len(all) == 1 { + props = all[0] + } + f := &Finder{ client: client, si: object.NewSearchIndex(client), r: recurser{ Collector: property.DefaultCollector(client), - All: all, + All: props, }, } + if len(all) == 0 { + // attempt to avoid SetDatacenter() requirement + f.dc, _ = f.DefaultDatacenter(context.Background()) + } + return f } @@ -57,6 +69,18 @@ func (f *Finder) SetDatacenter(dc *object.Datacenter) *Finder { return f } +// InventoryPath composes the given object's inventory path. +// There is no vSphere property or method that provides an inventory path directly. +// This method uses the ManagedEntity.Parent field to determine the ancestry tree of the object and +// the ManagedEntity.Name field for each ancestor to compose the path. +func InventoryPath(ctx context.Context, client *vim25.Client, obj types.ManagedObjectReference) (string, error) { + entities, err := mo.Ancestors(ctx, client, client.ServiceContent.PropertyCollector, obj) + if err != nil { + return "", err + } + return internal.InventoryPath(entities), nil +} + // findRoot makes it possible to use "find" mode with a different root path. // Example: ResourcePoolList("/dc1/host/cluster1/...") func (f *Finder) findRoot(ctx context.Context, root *list.Element, parts []string) bool { @@ -93,8 +117,8 @@ func (f *Finder) find(ctx context.Context, arg string, s *spec) ([]list.Element, isPath := strings.Contains(arg, "/") root := list.Element{ - Path: "/", Object: object.NewRootFolder(f.client), + Path: "/", } parts := list.ToParts(arg) @@ -109,19 +133,10 @@ func (f *Finder) find(ctx context.Context, arg string, s *spec) ([]list.Element, return nil, err } - mes, err := mo.Ancestors(ctx, f.client, f.client.ServiceContent.PropertyCollector, pivot.Reference()) + root.Path, err = InventoryPath(ctx, f.client, pivot.Reference()) if err != nil { return nil, err } - - for _, me := range mes { - // Skip root entity in building inventory path. - if me.Parent == nil { - continue - } - root.Path = path.Join(root.Path, me.Name) - } - root.Object = pivot parts = parts[1:] } @@ -253,7 +268,7 @@ func (f *Finder) managedObjectList(ctx context.Context, path string, tl bool, in fn = f.dcReference } - if len(path) == 0 { + if path == "" { path = "." } @@ -271,8 +286,7 @@ func (f *Finder) managedObjectList(ctx context.Context, path string, tl bool, in return f.find(ctx, path, s) } -// Element returns an Element for the given ManagedObjectReference -// This method is only useful for looking up the InventoryPath of a ManagedObjectReference. +// Element is deprecated, use InventoryPath() instead. func (f *Finder) Element(ctx context.Context, ref types.ManagedObjectReference) (*list.Element, error) { rl := func(_ context.Context) (object.Reference, error) { return ref, nil @@ -301,7 +315,7 @@ func (f *Finder) Element(ctx context.Context, ref types.ManagedObjectReference) // ObjectReference converts the given ManagedObjectReference to a type from the object package via object.NewReference // with the object.Common.InventoryPath field set. func (f *Finder) ObjectReference(ctx context.Context, ref types.ManagedObjectReference) (object.Reference, error) { - e, err := f.Element(ctx, ref) + path, err := InventoryPath(ctx, f.client, ref) if err != nil { return nil, err } @@ -312,7 +326,7 @@ func (f *Finder) ObjectReference(ctx context.Context, ref types.ManagedObjectRef SetInventoryPath(string) } - r.(common).SetInventoryPath(e.Path) + r.(common).SetInventoryPath(path) if f.dc != nil { if ds, ok := r.(*object.Datastore); ok { @@ -776,9 +790,26 @@ func (f *Finder) NetworkList(ctx context.Context, path string) ([]object.Network return ns, nil } +// Network finds a NetworkReference using a Name, Inventory Path, ManagedObject ID, Logical Switch UUID or Segment ID. +// With standard vSphere networking, Portgroups cannot have the same name within the same network folder. +// With NSX, Portgroups can have the same name, even within the same Switch. In this case, using an inventory path +// results in a MultipleFoundError. A MOID, switch UUID or segment ID can be used instead, as both are unique. +// See also: https://kb.vmware.com/s/article/79872#Duplicate_names +// Examples: +// - Name: "dvpg-1" +// - Inventory Path: "vds-1/dvpg-1" +// - ManagedObject ID: "DistributedVirtualPortgroup:dvportgroup-53" +// - Logical Switch UUID: "da2a59b8-2450-4cb2-b5cc-79c4c1d2144c" +// - Segment ID: "/infra/segments/vnet_ce50e69b-1784-4a14-9206-ffd7f1f146f7" func (f *Finder) Network(ctx context.Context, path string) (object.NetworkReference, error) { networks, err := f.NetworkList(ctx, path) if err != nil { + if _, ok := err.(*NotFoundError); ok { + net, nerr := f.networkByID(ctx, path) + if nerr == nil { + return net, nil + } + } return nil, err } @@ -789,6 +820,41 @@ func (f *Finder) Network(ctx context.Context, path string) (object.NetworkRefere return networks[0], nil } +func (f *Finder) networkByID(ctx context.Context, path string) (object.NetworkReference, error) { + if ref := object.ReferenceFromString(path); ref != nil { + // This is a MOID + return object.NewReference(f.client, *ref).(object.NetworkReference), nil + } + + kind := []string{"DistributedVirtualPortgroup"} + + m := view.NewManager(f.client) + v, err := m.CreateContainerView(ctx, f.client.ServiceContent.RootFolder, kind, true) + if err != nil { + return nil, err + } + defer v.Destroy(ctx) + + filter := property.Filter{ + "config.logicalSwitchUuid": path, + "config.segmentId": path, + } + + refs, err := v.FindAny(ctx, kind, filter) + if err != nil { + return nil, err + } + + if len(refs) == 0 { + return nil, &NotFoundError{"network", path} + } + if len(refs) > 1 { + return nil, &MultipleFoundError{"network", path} + } + + return object.NewReference(f.client, refs[0]).(object.NetworkReference), nil +} + func (f *Finder) DefaultNetwork(ctx context.Context) (object.NetworkReference, error) { network, err := f.Network(ctx, "*") if err != nil { diff --git a/vendor/github.com/vmware/govmomi/history/collector.go b/vendor/github.com/vmware/govmomi/history/collector.go new file mode 100644 index 000000000..9b25ea85a --- /dev/null +++ b/vendor/github.com/vmware/govmomi/history/collector.go @@ -0,0 +1,91 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package history + +import ( + "context" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type Collector struct { + r types.ManagedObjectReference + c *vim25.Client +} + +func NewCollector(c *vim25.Client, ref types.ManagedObjectReference) *Collector { + return &Collector{ + r: ref, + c: c, + } +} + +// Reference returns the managed object reference of this collector +func (c Collector) Reference() types.ManagedObjectReference { + return c.r +} + +// Client returns the vim25 client used by this collector +func (c Collector) Client() *vim25.Client { + return c.c +} + +// Properties wraps property.DefaultCollector().RetrieveOne() and returns +// properties for the specified managed object reference +func (c Collector) Properties(ctx context.Context, r types.ManagedObjectReference, ps []string, dst interface{}) error { + return property.DefaultCollector(c.c).RetrieveOne(ctx, r, ps, dst) +} + +func (c Collector) Destroy(ctx context.Context) error { + req := types.DestroyCollector{ + This: c.r, + } + + _, err := methods.DestroyCollector(ctx, c.c, &req) + return err +} + +func (c Collector) Reset(ctx context.Context) error { + req := types.ResetCollector{ + This: c.r, + } + + _, err := methods.ResetCollector(ctx, c.c, &req) + return err +} + +func (c Collector) Rewind(ctx context.Context) error { + req := types.RewindCollector{ + This: c.r, + } + + _, err := methods.RewindCollector(ctx, c.c, &req) + return err +} + +func (c Collector) SetPageSize(ctx context.Context, maxCount int32) error { + req := types.SetCollectorPageSize{ + This: c.r, + MaxCount: maxCount, + } + + _, err := methods.SetCollectorPageSize(ctx, c.c, &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/internal/helpers.go b/vendor/github.com/vmware/govmomi/internal/helpers.go new file mode 100644 index 000000000..b3eafeadf --- /dev/null +++ b/vendor/github.com/vmware/govmomi/internal/helpers.go @@ -0,0 +1,63 @@ +/* +Copyright (c) 2020 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "net" + "path" + + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// InventoryPath composed of entities by Name +func InventoryPath(entities []mo.ManagedEntity) string { + val := "/" + + for _, entity := range entities { + // Skip root folder in building inventory path. + if entity.Parent == nil { + continue + } + val = path.Join(val, entity.Name) + } + + return val +} + +func HostSystemManagementIPs(config []types.VirtualNicManagerNetConfig) []net.IP { + var ips []net.IP + + for _, nc := range config { + if nc.NicType != string(types.HostVirtualNicManagerNicTypeManagement) { + continue + } + for ix := range nc.CandidateVnic { + for _, selectedVnicKey := range nc.SelectedVnic { + if nc.CandidateVnic[ix].Key != selectedVnicKey { + continue + } + ip := net.ParseIP(nc.CandidateVnic[ix].Spec.Ip.IpAddress) + if ip != nil { + ips = append(ips, ip) + } + } + } + } + + return ips +} diff --git a/vendor/github.com/vmware/govmomi/internal/methods.go b/vendor/github.com/vmware/govmomi/internal/methods.go new file mode 100644 index 000000000..95ccee8d2 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/internal/methods.go @@ -0,0 +1,123 @@ +/* +Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + + "github.com/vmware/govmomi/vim25/soap" +) + +type RetrieveDynamicTypeManagerBody struct { + Req *RetrieveDynamicTypeManagerRequest `xml:"urn:vim25 RetrieveDynamicTypeManager"` + Res *RetrieveDynamicTypeManagerResponse `xml:"urn:vim25 RetrieveDynamicTypeManagerResponse"` + Fault_ *soap.Fault +} + +func (b *RetrieveDynamicTypeManagerBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDynamicTypeManager(ctx context.Context, r soap.RoundTripper, req *RetrieveDynamicTypeManagerRequest) (*RetrieveDynamicTypeManagerResponse, error) { + var reqBody, resBody RetrieveDynamicTypeManagerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveManagedMethodExecuterBody struct { + Req *RetrieveManagedMethodExecuterRequest `xml:"urn:vim25 RetrieveManagedMethodExecuter"` + Res *RetrieveManagedMethodExecuterResponse `xml:"urn:vim25 RetrieveManagedMethodExecuterResponse"` + Fault_ *soap.Fault +} + +func (b *RetrieveManagedMethodExecuterBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveManagedMethodExecuter(ctx context.Context, r soap.RoundTripper, req *RetrieveManagedMethodExecuterRequest) (*RetrieveManagedMethodExecuterResponse, error) { + var reqBody, resBody RetrieveManagedMethodExecuterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DynamicTypeMgrQueryMoInstancesBody struct { + Req *DynamicTypeMgrQueryMoInstancesRequest `xml:"urn:vim25 DynamicTypeMgrQueryMoInstances"` + Res *DynamicTypeMgrQueryMoInstancesResponse `xml:"urn:vim25 DynamicTypeMgrQueryMoInstancesResponse"` + Fault_ *soap.Fault +} + +func (b *DynamicTypeMgrQueryMoInstancesBody) Fault() *soap.Fault { return b.Fault_ } + +func DynamicTypeMgrQueryMoInstances(ctx context.Context, r soap.RoundTripper, req *DynamicTypeMgrQueryMoInstancesRequest) (*DynamicTypeMgrQueryMoInstancesResponse, error) { + var reqBody, resBody DynamicTypeMgrQueryMoInstancesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DynamicTypeMgrQueryTypeInfoBody struct { + Req *DynamicTypeMgrQueryTypeInfoRequest `xml:"urn:vim25 DynamicTypeMgrQueryTypeInfo"` + Res *DynamicTypeMgrQueryTypeInfoResponse `xml:"urn:vim25 DynamicTypeMgrQueryTypeInfoResponse"` + Fault_ *soap.Fault +} + +func (b *DynamicTypeMgrQueryTypeInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func DynamicTypeMgrQueryTypeInfo(ctx context.Context, r soap.RoundTripper, req *DynamicTypeMgrQueryTypeInfoRequest) (*DynamicTypeMgrQueryTypeInfoResponse, error) { + var reqBody, resBody DynamicTypeMgrQueryTypeInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ExecuteSoapBody struct { + Req *ExecuteSoapRequest `xml:"urn:vim25 ExecuteSoap"` + Res *ExecuteSoapResponse `xml:"urn:vim25 ExecuteSoapResponse"` + Fault_ *soap.Fault +} + +func (b *ExecuteSoapBody) Fault() *soap.Fault { return b.Fault_ } + +func ExecuteSoap(ctx context.Context, r soap.RoundTripper, req *ExecuteSoapRequest) (*ExecuteSoapResponse, error) { + var reqBody, resBody ExecuteSoapBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} diff --git a/vendor/github.com/vmware/govmomi/internal/types.go b/vendor/github.com/vmware/govmomi/internal/types.go new file mode 100644 index 000000000..b84103b89 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/internal/types.go @@ -0,0 +1,270 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "reflect" + + "github.com/vmware/govmomi/vim25/types" +) + +type DynamicTypeMgrQueryMoInstancesRequest struct { + This types.ManagedObjectReference `xml:"_this"` + FilterSpec BaseDynamicTypeMgrFilterSpec `xml:"filterSpec,omitempty,typeattr"` +} + +type DynamicTypeMgrQueryMoInstancesResponse struct { + Returnval []DynamicTypeMgrMoInstance `xml:"urn:vim25 returnval"` +} + +type DynamicTypeEnumTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + Value []string `xml:"value,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeEnumTypeInfo", reflect.TypeOf((*DynamicTypeEnumTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrAllTypeInfoRequest struct { + types.DynamicData + + ManagedTypeInfo []DynamicTypeMgrManagedTypeInfo `xml:"managedTypeInfo,omitempty"` + EnumTypeInfo []DynamicTypeEnumTypeInfo `xml:"enumTypeInfo,omitempty"` + DataTypeInfo []DynamicTypeMgrDataTypeInfo `xml:"dataTypeInfo,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrAllTypeInfo", reflect.TypeOf((*DynamicTypeMgrAllTypeInfoRequest)(nil)).Elem()) +} + +type DynamicTypeMgrAnnotation struct { + types.DynamicData + + Name string `xml:"name"` + Parameter []string `xml:"parameter,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrAnnotation", reflect.TypeOf((*DynamicTypeMgrAnnotation)(nil)).Elem()) +} + +type DynamicTypeMgrDataTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + Base []string `xml:"base,omitempty"` + Property []DynamicTypeMgrPropertyTypeInfo `xml:"property,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrDataTypeInfo", reflect.TypeOf((*DynamicTypeMgrDataTypeInfo)(nil)).Elem()) +} + +func (b *DynamicTypeMgrFilterSpec) GetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec { return b } + +type BaseDynamicTypeMgrFilterSpec interface { + GetDynamicTypeMgrFilterSpec() *DynamicTypeMgrFilterSpec +} + +type DynamicTypeMgrFilterSpec struct { + types.DynamicData +} + +func init() { + types.Add("DynamicTypeMgrFilterSpec", reflect.TypeOf((*DynamicTypeMgrFilterSpec)(nil)).Elem()) +} + +type DynamicTypeMgrManagedTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + Base []string `xml:"base,omitempty"` + Property []DynamicTypeMgrPropertyTypeInfo `xml:"property,omitempty"` + Method []DynamicTypeMgrMethodTypeInfo `xml:"method,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrManagedTypeInfo", reflect.TypeOf((*DynamicTypeMgrManagedTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrMethodTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + WsdlName string `xml:"wsdlName"` + Version string `xml:"version"` + ParamTypeInfo []DynamicTypeMgrParamTypeInfo `xml:"paramTypeInfo,omitempty"` + ReturnTypeInfo *DynamicTypeMgrParamTypeInfo `xml:"returnTypeInfo,omitempty"` + Fault []string `xml:"fault,omitempty"` + PrivId string `xml:"privId,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrMethodTypeInfo", reflect.TypeOf((*DynamicTypeMgrMethodTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrMoFilterSpec struct { + DynamicTypeMgrFilterSpec + + Id string `xml:"id,omitempty"` + TypeSubstr string `xml:"typeSubstr,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrMoFilterSpec", reflect.TypeOf((*DynamicTypeMgrMoFilterSpec)(nil)).Elem()) +} + +type DynamicTypeMgrMoInstance struct { + types.DynamicData + + Id string `xml:"id"` + MoType string `xml:"moType"` +} + +func init() { + types.Add("DynamicTypeMgrMoInstance", reflect.TypeOf((*DynamicTypeMgrMoInstance)(nil)).Elem()) +} + +type DynamicTypeMgrParamTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + Version string `xml:"version"` + Type string `xml:"type"` + PrivId string `xml:"privId,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrParamTypeInfo", reflect.TypeOf((*DynamicTypeMgrParamTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrPropertyTypeInfo struct { + types.DynamicData + + Name string `xml:"name"` + Version string `xml:"version"` + Type string `xml:"type"` + PrivId string `xml:"privId,omitempty"` + MsgIdFormat string `xml:"msgIdFormat,omitempty"` + Annotation []DynamicTypeMgrAnnotation `xml:"annotation,omitempty"` +} + +type DynamicTypeMgrQueryTypeInfoRequest struct { + This types.ManagedObjectReference `xml:"_this"` + FilterSpec BaseDynamicTypeMgrFilterSpec `xml:"filterSpec,omitempty,typeattr"` +} + +type DynamicTypeMgrQueryTypeInfoResponse struct { + Returnval DynamicTypeMgrAllTypeInfoRequest `xml:"urn:vim25 returnval"` +} + +func init() { + types.Add("DynamicTypeMgrPropertyTypeInfo", reflect.TypeOf((*DynamicTypeMgrPropertyTypeInfo)(nil)).Elem()) +} + +type DynamicTypeMgrTypeFilterSpec struct { + DynamicTypeMgrFilterSpec + + TypeSubstr string `xml:"typeSubstr,omitempty"` +} + +func init() { + types.Add("DynamicTypeMgrTypeFilterSpec", reflect.TypeOf((*DynamicTypeMgrTypeFilterSpec)(nil)).Elem()) +} + +type ReflectManagedMethodExecuterSoapArgument struct { + types.DynamicData + + Name string `xml:"name"` + Val string `xml:"val"` +} + +func init() { + types.Add("ReflectManagedMethodExecuterSoapArgument", reflect.TypeOf((*ReflectManagedMethodExecuterSoapArgument)(nil)).Elem()) +} + +type ReflectManagedMethodExecuterSoapFault struct { + types.DynamicData + + FaultMsg string `xml:"faultMsg"` + FaultDetail string `xml:"faultDetail,omitempty"` +} + +func init() { + types.Add("ReflectManagedMethodExecuterSoapFault", reflect.TypeOf((*ReflectManagedMethodExecuterSoapFault)(nil)).Elem()) +} + +type ReflectManagedMethodExecuterSoapResult struct { + types.DynamicData + + Response string `xml:"response,omitempty"` + Fault *ReflectManagedMethodExecuterSoapFault `xml:"fault,omitempty"` +} + +type RetrieveDynamicTypeManagerRequest struct { + This types.ManagedObjectReference `xml:"_this"` +} + +type RetrieveDynamicTypeManagerResponse struct { + Returnval *InternalDynamicTypeManager `xml:"urn:vim25 returnval"` +} + +type RetrieveManagedMethodExecuterRequest struct { + This types.ManagedObjectReference `xml:"_this"` +} + +func init() { + types.Add("RetrieveManagedMethodExecuter", reflect.TypeOf((*RetrieveManagedMethodExecuterRequest)(nil)).Elem()) +} + +type RetrieveManagedMethodExecuterResponse struct { + Returnval *ReflectManagedMethodExecuter `xml:"urn:vim25 returnval"` +} + +type InternalDynamicTypeManager struct { + types.ManagedObjectReference +} + +type ReflectManagedMethodExecuter struct { + types.ManagedObjectReference +} + +type ExecuteSoapRequest struct { + This types.ManagedObjectReference `xml:"_this"` + Moid string `xml:"moid"` + Version string `xml:"version"` + Method string `xml:"method"` + Argument []ReflectManagedMethodExecuterSoapArgument `xml:"argument,omitempty"` +} + +type ExecuteSoapResponse struct { + Returnval *ReflectManagedMethodExecuterSoapResult `xml:"urn:vim25 returnval"` +} diff --git a/vendor/github.com/vmware/govmomi/list/lister.go b/vendor/github.com/vmware/govmomi/list/lister.go index 2ee32e6bc..9a4caed68 100644 --- a/vendor/github.com/vmware/govmomi/list/lister.go +++ b/vendor/github.com/vmware/govmomi/list/lister.go @@ -165,6 +165,8 @@ func (l Lister) List(ctx context.Context) ([]Element, error) { return l.ListHostSystem(ctx) case "VirtualApp": return l.ListVirtualApp(ctx) + case "VmwareDistributedVirtualSwitch", "DistributedVirtualSwitch": + return l.ListDistributedVirtualSwitch(ctx) default: return nil, fmt.Errorf("cannot traverse type " + l.Reference.Type) } @@ -497,6 +499,69 @@ func (l Lister) ListHostSystem(ctx context.Context) ([]Element, error) { return es, nil } +func (l Lister) ListDistributedVirtualSwitch(ctx context.Context) ([]Element, error) { + ospec := types.ObjectSpec{ + Obj: l.Reference, + Skip: types.NewBool(true), + } + + fields := []string{ + "portgroup", + } + + for _, f := range fields { + tspec := types.TraversalSpec{ + Path: f, + Skip: types.NewBool(false), + Type: "DistributedVirtualSwitch", + } + + ospec.SelectSet = append(ospec.SelectSet, &tspec) + } + + childTypes := []string{ + "DistributedVirtualPortgroup", + } + + var pspecs []types.PropertySpec + for _, t := range childTypes { + pspec := types.PropertySpec{ + Type: t, + } + + if l.All { + pspec.All = types.NewBool(true) + } else { + pspec.PathSet = []string{"name"} + } + + pspecs = append(pspecs, pspec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspecs, + }, + }, + } + + var dst []interface{} + + err := l.retrieveProperties(ctx, req, &dst) + if err != nil { + return nil, err + } + + es := []Element{} + for _, v := range dst { + es = append(es, ToElement(v.(mo.Reference), l.Prefix)) + } + + return es, nil +} + func (l Lister) ListVirtualApp(ctx context.Context) ([]Element, error) { ospec := types.ObjectSpec{ Obj: l.Reference, diff --git a/vendor/github.com/vmware/govmomi/lookup/client.go b/vendor/github.com/vmware/govmomi/lookup/client.go index 896909fce..b3c19846a 100644 --- a/vendor/github.com/vmware/govmomi/lookup/client.go +++ b/vendor/github.com/vmware/govmomi/lookup/client.go @@ -25,6 +25,7 @@ import ( "github.com/vmware/govmomi/lookup/methods" "github.com/vmware/govmomi/lookup/types" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/soap" vim "github.com/vmware/govmomi/vim25/types" @@ -47,12 +48,28 @@ var ( type Client struct { *soap.Client + RoundTripper soap.RoundTripper + ServiceContent types.LookupServiceContent } // NewClient returns a client targeting the SSO Lookup Service API endpoint. func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { - sc := c.Client.NewServiceClient(Path, Namespace) + // PSC may be external, attempt to derive from sts.uri + path := &url.URL{Path: Path} + if c.ServiceContent.Setting != nil { + m := object.NewOptionManager(c, *c.ServiceContent.Setting) + opts, err := m.Query(ctx, "config.vpxd.sso.sts.uri") + if err == nil && len(opts) == 1 { + u, err := url.Parse(opts[0].GetOptionValue().Value.(string)) + if err == nil { + path.Scheme = u.Scheme + path.Host = u.Host + } + } + } + + sc := c.Client.NewServiceClient(path.String(), Namespace) sc.Version = Version req := types.RetrieveServiceContent{ @@ -64,7 +81,12 @@ func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { return nil, err } - return &Client{sc, res.Returnval}, nil + return &Client{sc, sc, res.Returnval}, nil +} + +// RoundTrip dispatches to the RoundTripper field. +func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + return c.RoundTripper.RoundTrip(ctx, req, res) } func (c *Client) List(ctx context.Context, filter *types.LookupServiceRegistrationFilter) ([]types.LookupServiceRegistrationInfo, error) { diff --git a/vendor/github.com/vmware/govmomi/nfc/lease.go b/vendor/github.com/vmware/govmomi/nfc/lease.go index 3fb85ee6f..457568033 100644 --- a/vendor/github.com/vmware/govmomi/nfc/lease.go +++ b/vendor/github.com/vmware/govmomi/nfc/lease.go @@ -18,16 +18,15 @@ package nfc import ( "context" - "errors" "fmt" "io" "path" "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/task" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/mo" - "github.com/vmware/govmomi/vim25/progress" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) @@ -195,7 +194,7 @@ func (l *Lease) Wait(ctx context.Context, items []types.OvfFileItem) (*LeaseInfo } if lease.Error != nil { - return nil, errors.New(lease.Error.LocalizedMessage) + return nil, &task.Error{LocalizedMethodFault: lease.Error} } return nil, fmt.Errorf("unexpected nfc lease state: %s", lease.State) @@ -208,8 +207,6 @@ func (l *Lease) StartUpdater(ctx context.Context, info *LeaseInfo) *LeaseUpdater func (l *Lease) Upload(ctx context.Context, item FileItem, f io.Reader, opts soap.Upload) error { if opts.Progress == nil { opts.Progress = item - } else { - opts.Progress = progress.Tee(item, opts.Progress) } // Non-disk files (such as .iso) use the PUT method. @@ -230,8 +227,6 @@ func (l *Lease) Upload(ctx context.Context, item FileItem, f io.Reader, opts soa func (l *Lease) DownloadFile(ctx context.Context, file string, item FileItem, opts soap.Download) error { if opts.Progress == nil { opts.Progress = item - } else { - opts.Progress = progress.Tee(item, opts.Progress) } return l.c.DownloadFile(ctx, file, item.URL, &opts) diff --git a/vendor/github.com/vmware/govmomi/nfc/lease_updater.go b/vendor/github.com/vmware/govmomi/nfc/lease_updater.go index d77c3596a..02ce9cf53 100644 --- a/vendor/github.com/vmware/govmomi/nfc/lease_updater.go +++ b/vendor/github.com/vmware/govmomi/nfc/lease_updater.go @@ -57,8 +57,8 @@ func (o FileItem) File() types.OvfFile { } type LeaseUpdater struct { - pos int64 // Number of bytes (keep first to ensure 64 bit aligment) - total int64 // Total number of bytes (keep first to ensure 64 bit aligment) + pos int64 // Number of bytes (keep first to ensure 64 bit alignment) + total int64 // Total number of bytes (keep first to ensure 64 bit alignment) lease *Lease diff --git a/vendor/github.com/vmware/govmomi/object/authorization_manager.go b/vendor/github.com/vmware/govmomi/object/authorization_manager.go index b703258fe..5cd6851a8 100644 --- a/vendor/github.com/vmware/govmomi/object/authorization_manager.go +++ b/vendor/github.com/vmware/govmomi/object/authorization_manager.go @@ -172,3 +172,50 @@ func (m AuthorizationManager) UpdateRole(ctx context.Context, id int32, name str _, err := methods.UpdateAuthorizationRole(ctx, m.Client(), &req) return err } + +func (m AuthorizationManager) HasUserPrivilegeOnEntities(ctx context.Context, entities []types.ManagedObjectReference, userName string, privID []string) ([]types.EntityPrivilege, error) { + req := types.HasUserPrivilegeOnEntities{ + This: m.Reference(), + Entities: entities, + UserName: userName, + PrivId: privID, + } + + res, err := methods.HasUserPrivilegeOnEntities(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AuthorizationManager) HasPrivilegeOnEntity(ctx context.Context, entity types.ManagedObjectReference, sessionID string, privID []string) ([]bool, error) { + req := types.HasPrivilegeOnEntity{ + This: m.Reference(), + Entity: entity, + SessionId: sessionID, + PrivId: privID, + } + + res, err := methods.HasPrivilegeOnEntity(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (m AuthorizationManager) FetchUserPrivilegeOnEntities(ctx context.Context, entities []types.ManagedObjectReference, userName string) ([]types.UserPrivilegeResult, error) { + req := types.FetchUserPrivilegeOnEntities{ + This: m.Reference(), + Entities: entities, + UserName: userName, + } + + res, err := methods.FetchUserPrivilegeOnEntities(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go b/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go index 24c346825..018372dfe 100644 --- a/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go +++ b/vendor/github.com/vmware/govmomi/object/cluster_compute_resource.go @@ -87,3 +87,17 @@ func (c ClusterComputeResource) MoveInto(ctx context.Context, hosts ...*HostSyst return NewTask(c.c, res.Returnval), nil } + +func (c ClusterComputeResource) PlaceVm(ctx context.Context, spec types.PlacementSpec) (*types.PlacementResult, error) { + req := types.PlaceVm{ + This: c.Reference(), + PlacementSpec: spec, + } + + res, err := methods.PlaceVm(ctx, c.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/common.go b/vendor/github.com/vmware/govmomi/object/common.go index abb4076c7..88ce78fce 100644 --- a/vendor/github.com/vmware/govmomi/object/common.go +++ b/vendor/github.com/vmware/govmomi/object/common.go @@ -75,29 +75,22 @@ func (c *Common) SetInventoryPath(p string) { c.InventoryPath = p } -// ObjectName returns the base name of the InventoryPath field if set, -// otherwise fetches the mo.ManagedEntity.Name field via the property collector. +// ObjectName fetches the mo.ManagedEntity.Name field via the property collector. func (c Common) ObjectName(ctx context.Context) (string, error) { - var o mo.ManagedEntity + var content []types.ObjectContent - err := c.Properties(ctx, c.Reference(), []string{"name"}, &o) + err := c.Properties(ctx, c.Reference(), []string{"name"}, &content) if err != nil { return "", err } - if o.Name != "" { - return o.Name, nil + for i := range content { + for _, prop := range content[i].PropSet { + return prop.Val.(string), nil + } } - // Network has its own "name" field... - var n mo.Network - - err = c.Properties(ctx, c.Reference(), []string{"name"}, &n) - if err != nil { - return "", err - } - - return n.Name, nil + return "", nil } // Properties is a wrapper for property.DefaultCollector().RetrieveOne() @@ -142,3 +135,14 @@ func (c Common) SetCustomValue(ctx context.Context, key string, value string) er _, err := methods.SetCustomValue(ctx, c.c, &req) return err } + +func ReferenceFromString(s string) *types.ManagedObjectReference { + var ref types.ManagedObjectReference + if !ref.FromString(s) { + return nil + } + if mo.IsManagedObjectType(ref.Type) { + return &ref + } + return nil +} diff --git a/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go b/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go index cb8b965dc..e9a3914d9 100644 --- a/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go +++ b/vendor/github.com/vmware/govmomi/object/customization_spec_manager.go @@ -21,6 +21,7 @@ import ( "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -36,6 +37,12 @@ func NewCustomizationSpecManager(c *vim25.Client) *CustomizationSpecManager { return &cs } +func (cs CustomizationSpecManager) Info(ctx context.Context) ([]types.CustomizationSpecInfo, error) { + var m mo.CustomizationSpecManager + err := cs.Properties(ctx, cs.Reference(), []string{"info"}, &m) + return m.Info, err +} + func (cs CustomizationSpecManager) DoesCustomizationSpecExist(ctx context.Context, name string) (bool, error) { req := types.DoesCustomizationSpecExist{ This: cs.Reference(), diff --git a/vendor/github.com/vmware/govmomi/object/datastore.go b/vendor/github.com/vmware/govmomi/object/datastore.go index 46a99950a..65264ae15 100644 --- a/vendor/github.com/vmware/govmomi/object/datastore.go +++ b/vendor/github.com/vmware/govmomi/object/datastore.go @@ -68,6 +68,11 @@ func NewDatastore(c *vim25.Client, ref types.ManagedObjectReference) *Datastore } func (d Datastore) Path(path string) string { + var p DatastorePath + if p.FromString(path) { + return p.String() // already in "[datastore] path" format + } + return (&DatastorePath{ Datastore: d.Name(), Path: path, diff --git a/vendor/github.com/vmware/govmomi/object/datastore_file.go b/vendor/github.com/vmware/govmomi/object/datastore_file.go index bc010fc36..86d7d9c72 100644 --- a/vendor/github.com/vmware/govmomi/object/datastore_file.go +++ b/vendor/github.com/vmware/govmomi/object/datastore_file.go @@ -297,10 +297,8 @@ func (f *DatastoreFile) TailFunc(lines int, include func(line int, message strin nread = bsize + remain eof = true - } else { - if pos, err = f.Seek(offset, io.SeekEnd); err != nil { - return err - } + } else if pos, err = f.Seek(offset, io.SeekEnd); err != nil { + return err } if _, err = io.CopyN(buf, f, nread); err != nil { diff --git a/vendor/github.com/vmware/govmomi/object/datastore_path.go b/vendor/github.com/vmware/govmomi/object/datastore_path.go index 1563ee1e1..104c7dfe3 100644 --- a/vendor/github.com/vmware/govmomi/object/datastore_path.go +++ b/vendor/github.com/vmware/govmomi/object/datastore_path.go @@ -31,7 +31,7 @@ type DatastorePath struct { // FromString parses a datastore path. // Returns true if the path could be parsed, false otherwise. func (p *DatastorePath) FromString(s string) bool { - if len(s) == 0 { + if s == "" { return false } diff --git a/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go b/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go index 5baf1ad90..026dc1cb5 100644 --- a/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go +++ b/vendor/github.com/vmware/govmomi/object/diagnostic_manager.go @@ -71,10 +71,8 @@ func (m DiagnosticManager) GenerateLogBundles(ctx context.Context, includeDefaul IncludeDefault: includeDefault, } - if host != nil { - for _, h := range host { - req.Host = append(req.Host, h.Reference()) - } + for _, h := range host { + req.Host = append(req.Host, h.Reference()) } res, err := methods.GenerateLogBundles_Task(ctx, m.c, &req) diff --git a/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go index f8ac5512c..c2abb8fab 100644 --- a/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go +++ b/vendor/github.com/vmware/govmomi/object/distributed_virtual_portgroup.go @@ -36,6 +36,10 @@ func NewDistributedVirtualPortgroup(c *vim25.Client, ref types.ManagedObjectRefe } } +func (p DistributedVirtualPortgroup) GetInventoryPath() string { + return p.InventoryPath +} + // EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this DistributedVirtualPortgroup func (p DistributedVirtualPortgroup) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { var dvp mo.DistributedVirtualPortgroup @@ -46,9 +50,15 @@ func (p DistributedVirtualPortgroup) EthernetCardBackingInfo(ctx context.Context return nil, err } + // From the docs at https://code.vmware.com/apis/196/vsphere/doc/vim.dvs.DistributedVirtualPortgroup.ConfigInfo.html: // "This property should always be set unless the user's setting does not have System.Read privilege on the object referred to by this property." + // Note that "the object" refers to the Switch, not the PortGroup. if dvp.Config.DistributedVirtualSwitch == nil { - return nil, fmt.Errorf("no System.Read privilege on: %s.%s", p.Reference(), prop) + name := p.InventoryPath + if name == "" { + name = p.Reference().String() + } + return nil, fmt.Errorf("failed to create EthernetCardBackingInfo for %s: System.Read privilege required for %s", name, prop) } if err := p.Properties(ctx, *dvp.Config.DistributedVirtualSwitch, []string{"uuid"}, &dvs); err != nil { diff --git a/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go b/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go index 526ce4bf7..cbfc4c3af 100644 --- a/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go +++ b/vendor/github.com/vmware/govmomi/object/distributed_virtual_switch.go @@ -18,6 +18,7 @@ package object import ( "context" + "fmt" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" @@ -34,8 +35,17 @@ func NewDistributedVirtualSwitch(c *vim25.Client, ref types.ManagedObjectReferen } } +func (s DistributedVirtualSwitch) GetInventoryPath() string { + return s.InventoryPath +} + func (s DistributedVirtualSwitch) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { - return nil, ErrNotSupported // TODO: just to satisfy NetworkReference interface for the finder + ref := s.Reference() + name := s.InventoryPath + if name == "" { + name = ref.String() + } + return nil, fmt.Errorf("type %s (%s) cannot be used for EthernetCardBackingInfo", ref.Type, name) } func (s DistributedVirtualSwitch) Reconfigure(ctx context.Context, spec types.BaseDVSConfigSpec) (*Task, error) { @@ -78,3 +88,17 @@ func (s DistributedVirtualSwitch) FetchDVPorts(ctx context.Context, criteria *ty } return res.Returnval, nil } + +func (s DistributedVirtualSwitch) ReconfigureDVPort(ctx context.Context, spec []types.DVPortConfigSpec) (*Task, error) { + req := types.ReconfigureDVPort_Task{ + This: s.Reference(), + Port: spec, + } + + res, err := methods.ReconfigureDVPort_Task(ctx, s.Client(), &req) + if err != nil { + return nil, err + } + + return NewTask(s.Client(), res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/folder.go b/vendor/github.com/vmware/govmomi/object/folder.go index 7a69949f9..6e0a7649b 100644 --- a/vendor/github.com/vmware/govmomi/object/folder.go +++ b/vendor/github.com/vmware/govmomi/object/folder.go @@ -225,3 +225,17 @@ func (f Folder) MoveInto(ctx context.Context, list []types.ManagedObjectReferenc return NewTask(f.c, res.Returnval), nil } + +func (f Folder) PlaceVmsXCluster(ctx context.Context, spec types.PlaceVmsXClusterSpec) (*types.PlaceVmsXClusterResult, error) { + req := types.PlaceVmsXCluster{ + This: f.Reference(), + PlacementSpec: spec, + } + + res, err := methods.PlaceVmsXCluster(ctx, f.c, &req) + if err != nil { + return nil, err + } + + return &res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/history_collector.go b/vendor/github.com/vmware/govmomi/object/history_collector.go deleted file mode 100644 index afdcab78b..000000000 --- a/vendor/github.com/vmware/govmomi/object/history_collector.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package object - -import ( - "context" - - "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/methods" - "github.com/vmware/govmomi/vim25/types" -) - -type HistoryCollector struct { - Common -} - -func NewHistoryCollector(c *vim25.Client, ref types.ManagedObjectReference) *HistoryCollector { - return &HistoryCollector{ - Common: NewCommon(c, ref), - } -} - -func (h HistoryCollector) Destroy(ctx context.Context) error { - req := types.DestroyCollector{ - This: h.Reference(), - } - - _, err := methods.DestroyCollector(ctx, h.c, &req) - return err -} - -func (h HistoryCollector) Reset(ctx context.Context) error { - req := types.ResetCollector{ - This: h.Reference(), - } - - _, err := methods.ResetCollector(ctx, h.c, &req) - return err -} - -func (h HistoryCollector) Rewind(ctx context.Context) error { - req := types.RewindCollector{ - This: h.Reference(), - } - - _, err := methods.RewindCollector(ctx, h.c, &req) - return err -} - -func (h HistoryCollector) SetPageSize(ctx context.Context, maxCount int32) error { - req := types.SetCollectorPageSize{ - This: h.Reference(), - MaxCount: maxCount, - } - - _, err := methods.SetCollectorPageSize(ctx, h.c, &req) - return err -} diff --git a/vendor/github.com/vmware/govmomi/object/host_config_manager.go b/vendor/github.com/vmware/govmomi/object/host_config_manager.go index 123227ecc..eac59a32e 100644 --- a/vendor/github.com/vmware/govmomi/object/host_config_manager.go +++ b/vendor/github.com/vmware/govmomi/object/host_config_manager.go @@ -18,9 +18,9 @@ package object import ( "context" + "fmt" "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -34,163 +34,138 @@ func NewHostConfigManager(c *vim25.Client, ref types.ManagedObjectReference) *Ho } } -func (m HostConfigManager) DatastoreSystem(ctx context.Context) (*HostDatastoreSystem, error) { - var h mo.HostSystem +// reference returns the ManagedObjectReference for the given HostConfigManager property name. +// An error is returned if the field is nil, of type ErrNotSupported if versioned is true. +func (m HostConfigManager) reference(ctx context.Context, name string, versioned ...bool) (types.ManagedObjectReference, error) { + prop := "configManager." + name + var content []types.ObjectContent - err := m.Properties(ctx, m.Reference(), []string{"configManager.datastoreSystem"}, &h) + err := m.Properties(ctx, m.Reference(), []string{prop}, &content) if err != nil { - return nil, err + return types.ManagedObjectReference{}, err } - return NewHostDatastoreSystem(m.c, *h.ConfigManager.DatastoreSystem), nil -} + for _, c := range content { + for _, p := range c.PropSet { + if p.Name != prop { + continue + } + if ref, ok := p.Val.(types.ManagedObjectReference); ok { + return ref, nil + } + } + } -func (m HostConfigManager) NetworkSystem(ctx context.Context) (*HostNetworkSystem, error) { - var h mo.HostSystem + err = fmt.Errorf("%s %s is nil", m.Reference(), prop) + if len(versioned) == 1 && versioned[0] { + err = ErrNotSupported + } + return types.ManagedObjectReference{}, err +} - err := m.Properties(ctx, m.Reference(), []string{"configManager.networkSystem"}, &h) +func (m HostConfigManager) DatastoreSystem(ctx context.Context) (*HostDatastoreSystem, error) { + ref, err := m.reference(ctx, "datastoreSystem") if err != nil { return nil, err } + return NewHostDatastoreSystem(m.c, ref), nil +} - return NewHostNetworkSystem(m.c, *h.ConfigManager.NetworkSystem), nil +func (m HostConfigManager) NetworkSystem(ctx context.Context) (*HostNetworkSystem, error) { + ref, err := m.reference(ctx, "networkSystem") + if err != nil { + return nil, err + } + return NewHostNetworkSystem(m.c, ref), nil } func (m HostConfigManager) FirewallSystem(ctx context.Context) (*HostFirewallSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.firewallSystem"}, &h) + ref, err := m.reference(ctx, "firewallSystem") if err != nil { return nil, err } - return NewHostFirewallSystem(m.c, *h.ConfigManager.FirewallSystem), nil + return NewHostFirewallSystem(m.c, ref), nil } func (m HostConfigManager) StorageSystem(ctx context.Context) (*HostStorageSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.storageSystem"}, &h) + ref, err := m.reference(ctx, "storageSystem") if err != nil { return nil, err } - - return NewHostStorageSystem(m.c, *h.ConfigManager.StorageSystem), nil + return NewHostStorageSystem(m.c, ref), nil } func (m HostConfigManager) VirtualNicManager(ctx context.Context) (*HostVirtualNicManager, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.virtualNicManager"}, &h) + ref, err := m.reference(ctx, "virtualNicManager") if err != nil { return nil, err } - - return NewHostVirtualNicManager(m.c, *h.ConfigManager.VirtualNicManager, m.Reference()), nil + return NewHostVirtualNicManager(m.c, ref, m.Reference()), nil } func (m HostConfigManager) VsanSystem(ctx context.Context) (*HostVsanSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.vsanSystem"}, &h) + ref, err := m.reference(ctx, "vsanSystem", true) // Added in 5.5 if err != nil { return nil, err } - - // Added in 5.5 - if h.ConfigManager.VsanSystem == nil { - return nil, ErrNotSupported - } - - return NewHostVsanSystem(m.c, *h.ConfigManager.VsanSystem), nil + return NewHostVsanSystem(m.c, ref), nil } func (m HostConfigManager) VsanInternalSystem(ctx context.Context) (*HostVsanInternalSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.vsanInternalSystem"}, &h) + ref, err := m.reference(ctx, "vsanInternalSystem", true) // Added in 5.5 if err != nil { return nil, err } - - // Added in 5.5 - if h.ConfigManager.VsanInternalSystem == nil { - return nil, ErrNotSupported - } - - return NewHostVsanInternalSystem(m.c, *h.ConfigManager.VsanInternalSystem), nil + return NewHostVsanInternalSystem(m.c, ref), nil } func (m HostConfigManager) AccountManager(ctx context.Context) (*HostAccountManager, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.accountManager"}, &h) + ref, err := m.reference(ctx, "accountManager", true) // Added in 5.5 if err != nil { - return nil, err - } - - ref := h.ConfigManager.AccountManager // Added in 6.0 - if ref == nil { - // Versions < 5.5 can use the ServiceContent ref, - // but we can only use it when connected directly to ESX. - c := m.Client() - if !c.IsVC() { - ref = c.ServiceContent.AccountManager - } - - if ref == nil { - return nil, ErrNotSupported + if err == ErrNotSupported { + // Versions < 5.5 can use the ServiceContent ref, + // but only when connected directly to ESX. + if m.c.ServiceContent.AccountManager == nil { + return nil, err + } + ref = *m.c.ServiceContent.AccountManager + } else { + return nil, err } } - return NewHostAccountManager(m.c, *ref), nil + return NewHostAccountManager(m.c, ref), nil } func (m HostConfigManager) OptionManager(ctx context.Context) (*OptionManager, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.advancedOption"}, &h) + ref, err := m.reference(ctx, "advancedOption") if err != nil { return nil, err } - - return NewOptionManager(m.c, *h.ConfigManager.AdvancedOption), nil + return NewOptionManager(m.c, ref), nil } func (m HostConfigManager) ServiceSystem(ctx context.Context) (*HostServiceSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.serviceSystem"}, &h) + ref, err := m.reference(ctx, "serviceSystem") if err != nil { return nil, err } - - return NewHostServiceSystem(m.c, *h.ConfigManager.ServiceSystem), nil + return NewHostServiceSystem(m.c, ref), nil } func (m HostConfigManager) CertificateManager(ctx context.Context) (*HostCertificateManager, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.certificateManager"}, &h) + ref, err := m.reference(ctx, "certificateManager", true) // Added in 6.0 if err != nil { return nil, err } - - // Added in 6.0 - if h.ConfigManager.CertificateManager == nil { - return nil, ErrNotSupported - } - - return NewHostCertificateManager(m.c, *h.ConfigManager.CertificateManager, m.Reference()), nil + return NewHostCertificateManager(m.c, ref, m.Reference()), nil } func (m HostConfigManager) DateTimeSystem(ctx context.Context) (*HostDateTimeSystem, error) { - var h mo.HostSystem - - err := m.Properties(ctx, m.Reference(), []string{"configManager.dateTimeSystem"}, &h) + ref, err := m.reference(ctx, "dateTimeSystem") if err != nil { return nil, err } - - return NewHostDateTimeSystem(m.c, *h.ConfigManager.DateTimeSystem), nil + return NewHostDateTimeSystem(m.c, ref), nil } diff --git a/vendor/github.com/vmware/govmomi/object/host_datastore_system.go b/vendor/github.com/vmware/govmomi/object/host_datastore_system.go index 7b738e611..64f3add91 100644 --- a/vendor/github.com/vmware/govmomi/object/host_datastore_system.go +++ b/vendor/github.com/vmware/govmomi/object/host_datastore_system.go @@ -117,3 +117,19 @@ func (s HostDatastoreSystem) QueryVmfsDatastoreCreateOptions(ctx context.Context return res.Returnval, nil } + +func (s HostDatastoreSystem) ResignatureUnresolvedVmfsVolumes(ctx context.Context, devicePaths []string) (*Task, error) { + req := &types.ResignatureUnresolvedVmfsVolume_Task{ + This: s.Reference(), + ResolutionSpec: types.HostUnresolvedVmfsResignatureSpec{ + ExtentDevicePath: devicePaths, + }, + } + + res, err := methods.ResignatureUnresolvedVmfsVolume_Task(ctx, s.Client(), req) + if err != nil { + return nil, err + } + + return NewTask(s.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_network_system.go b/vendor/github.com/vmware/govmomi/object/host_network_system.go index c21e1ec35..340b764a5 100644 --- a/vendor/github.com/vmware/govmomi/object/host_network_system.go +++ b/vendor/github.com/vmware/govmomi/object/host_network_system.go @@ -98,18 +98,18 @@ func (o HostNetworkSystem) AddVirtualSwitch(ctx context.Context, vswitchName str } // QueryNetworkHint wraps methods.QueryNetworkHint -func (o HostNetworkSystem) QueryNetworkHint(ctx context.Context, device []string) error { +func (o HostNetworkSystem) QueryNetworkHint(ctx context.Context, device []string) ([]types.PhysicalNicHintInfo, error) { req := types.QueryNetworkHint{ This: o.Reference(), Device: device, } - _, err := methods.QueryNetworkHint(ctx, o.c, &req) + res, err := methods.QueryNetworkHint(ctx, o.c, &req) if err != nil { - return err + return nil, err } - return nil + return res.Returnval, err } // RefreshNetworkSystem wraps methods.RefreshNetworkSystem diff --git a/vendor/github.com/vmware/govmomi/object/host_storage_system.go b/vendor/github.com/vmware/govmomi/object/host_storage_system.go index 5990a1d4e..5c9f08eee 100644 --- a/vendor/github.com/vmware/govmomi/object/host_storage_system.go +++ b/vendor/github.com/vmware/govmomi/object/host_storage_system.go @@ -172,3 +172,29 @@ func (s HostStorageSystem) AttachScsiLun(ctx context.Context, uuid string) error return err } + +func (s HostStorageSystem) QueryUnresolvedVmfsVolumes(ctx context.Context) ([]types.HostUnresolvedVmfsVolume, error) { + req := &types.QueryUnresolvedVmfsVolume{ + This: s.Reference(), + } + + res, err := methods.QueryUnresolvedVmfsVolume(ctx, s.Client(), req) + if err != nil { + return nil, err + } + return res.Returnval, nil +} + +func (s HostStorageSystem) UnmountVmfsVolume(ctx context.Context, vmfsUuid string) error { + req := &types.UnmountVmfsVolume{ + This: s.Reference(), + VmfsUuid: vmfsUuid, + } + + _, err := methods.UnmountVmfsVolume(ctx, s.Client(), req) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/object/host_system.go b/vendor/github.com/vmware/govmomi/object/host_system.go index a350edfde..ddf6cb8f1 100644 --- a/vendor/github.com/vmware/govmomi/object/host_system.go +++ b/vendor/github.com/vmware/govmomi/object/host_system.go @@ -21,6 +21,7 @@ import ( "fmt" "net" + "github.com/vmware/govmomi/internal" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/mo" @@ -81,17 +82,17 @@ func (h HostSystem) ManagementIPs(ctx context.Context) ([]net.IP, error) { return nil, err } - var ips []net.IP - for _, nc := range mh.Config.VirtualNicManagerInfo.NetConfig { - if nc.NicType == "management" && len(nc.CandidateVnic) > 0 { - ip := net.ParseIP(nc.CandidateVnic[0].Spec.Ip.IpAddress) - if ip != nil { - ips = append(ips, ip) - } - } + config := mh.Config + if config == nil { + return nil, nil } - return ips, nil + info := config.VirtualNicManagerInfo + if info == nil { + return nil, nil + } + + return internal.HostSystemManagementIPs(info.NetConfig), nil } func (h HostSystem) Disconnect(ctx context.Context) (*Task, error) { diff --git a/vendor/github.com/vmware/govmomi/object/network.go b/vendor/github.com/vmware/govmomi/object/network.go index d1dc7ce01..f209a7ac4 100644 --- a/vendor/github.com/vmware/govmomi/object/network.go +++ b/vendor/github.com/vmware/govmomi/object/network.go @@ -20,7 +20,6 @@ import ( "context" "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -34,21 +33,20 @@ func NewNetwork(c *vim25.Client, ref types.ManagedObjectReference) *Network { } } +func (n Network) GetInventoryPath() string { + return n.InventoryPath +} + // EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network func (n Network) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { - var e mo.Network - - // Use Network.Name rather than Common.Name as the latter does not return the complete name if it contains a '/' - // We can't use Common.ObjectName here either as we need the ManagedEntity.Name field is not set since mo.Network - // has its own Name field. - err := n.Properties(ctx, n.Reference(), []string{"name"}, &e) + name, err := n.ObjectName(ctx) if err != nil { return nil, err } backing := &types.VirtualEthernetCardNetworkBackingInfo{ VirtualDeviceDeviceBackingInfo: types.VirtualDeviceDeviceBackingInfo{ - DeviceName: e.Name, + DeviceName: name, }, } diff --git a/vendor/github.com/vmware/govmomi/object/network_reference.go b/vendor/github.com/vmware/govmomi/object/network_reference.go index 7716bdb38..f1a41cd59 100644 --- a/vendor/github.com/vmware/govmomi/object/network_reference.go +++ b/vendor/github.com/vmware/govmomi/object/network_reference.go @@ -26,6 +26,6 @@ import ( // which can be used as the backing for a VirtualEthernetCard. type NetworkReference interface { Reference - + GetInventoryPath() string EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) } diff --git a/vendor/github.com/vmware/govmomi/object/opaque_network.go b/vendor/github.com/vmware/govmomi/object/opaque_network.go index 47ce4cefe..6797d325d 100644 --- a/vendor/github.com/vmware/govmomi/object/opaque_network.go +++ b/vendor/github.com/vmware/govmomi/object/opaque_network.go @@ -35,19 +35,17 @@ func NewOpaqueNetwork(c *vim25.Client, ref types.ManagedObjectReference) *Opaque } } +func (n OpaqueNetwork) GetInventoryPath() string { + return n.InventoryPath +} + // EthernetCardBackingInfo returns the VirtualDeviceBackingInfo for this Network func (n OpaqueNetwork) EthernetCardBackingInfo(ctx context.Context) (types.BaseVirtualDeviceBackingInfo, error) { - var net mo.OpaqueNetwork - - if err := n.Properties(ctx, n.Reference(), []string{"summary"}, &net); err != nil { + summary, err := n.Summary(ctx) + if err != nil { return nil, err } - summary, ok := net.Summary.(*types.OpaqueNetworkSummary) - if !ok { - return nil, fmt.Errorf("%s unsupported network type: %T", n, net.Summary) - } - backing := &types.VirtualEthernetCardOpaqueNetworkBackingInfo{ OpaqueNetworkId: summary.OpaqueNetworkId, OpaqueNetworkType: summary.OpaqueNetworkType, @@ -55,3 +53,20 @@ func (n OpaqueNetwork) EthernetCardBackingInfo(ctx context.Context) (types.BaseV return backing, nil } + +// Summary returns the mo.OpaqueNetwork.Summary property +func (n OpaqueNetwork) Summary(ctx context.Context) (*types.OpaqueNetworkSummary, error) { + var props mo.OpaqueNetwork + + err := n.Properties(ctx, n.Reference(), []string{"summary"}, &props) + if err != nil { + return nil, err + } + + summary, ok := props.Summary.(*types.OpaqueNetworkSummary) + if !ok { + return nil, fmt.Errorf("%s unsupported network summary type: %T", n, props.Summary) + } + + return summary, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/resource_pool.go b/vendor/github.com/vmware/govmomi/object/resource_pool.go index 55c2e2b2f..e510006b4 100644 --- a/vendor/github.com/vmware/govmomi/object/resource_pool.go +++ b/vendor/github.com/vmware/govmomi/object/resource_pool.go @@ -22,6 +22,7 @@ import ( "github.com/vmware/govmomi/nfc" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) @@ -35,6 +36,18 @@ func NewResourcePool(c *vim25.Client, ref types.ManagedObjectReference) *Resourc } } +// Owner returns the ResourcePool owner as a ClusterComputeResource or ComputeResource. +func (p ResourcePool) Owner(ctx context.Context) (Reference, error) { + var pool mo.ResourcePool + + err := p.Properties(ctx, p.Reference(), []string{"owner"}, &pool) + if err != nil { + return nil, err + } + + return NewReference(p.Client(), pool.Owner), nil +} + func (p ResourcePool) ImportVApp(ctx context.Context, spec types.BaseImportSpec, folder *Folder, host *HostSystem) (*nfc.Lease, error) { req := types.ImportVApp{ This: p.Reference(), diff --git a/vendor/github.com/vmware/govmomi/object/search_index.go b/vendor/github.com/vmware/govmomi/object/search_index.go index 4b0a525d5..bcf5e29f2 100644 --- a/vendor/github.com/vmware/govmomi/object/search_index.go +++ b/vendor/github.com/vmware/govmomi/object/search_index.go @@ -161,3 +161,88 @@ func (s SearchIndex) FindChild(ctx context.Context, entity Reference, name strin } return NewReference(s.c, *res.Returnval), nil } + +// FindAllByDnsName finds all virtual machines or hosts by DNS name. +func (s SearchIndex) FindAllByDnsName(ctx context.Context, dc *Datacenter, dnsName string, vmSearch bool) ([]Reference, error) { + req := types.FindAllByDnsName{ + This: s.Reference(), + DnsName: dnsName, + VmSearch: vmSearch, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindAllByDnsName(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if len(res.Returnval) == 0 { + return nil, nil + } + + var references []Reference + for _, returnval := range res.Returnval { + references = append(references, NewReference(s.c, returnval)) + } + return references, nil +} + +// FindAllByIp finds all virtual machines or hosts by IP address. +func (s SearchIndex) FindAllByIp(ctx context.Context, dc *Datacenter, ip string, vmSearch bool) ([]Reference, error) { + req := types.FindAllByIp{ + This: s.Reference(), + Ip: ip, + VmSearch: vmSearch, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindAllByIp(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if len(res.Returnval) == 0 { + return nil, nil + } + + var references []Reference + for _, returnval := range res.Returnval { + references = append(references, NewReference(s.c, returnval)) + } + return references, nil +} + +// FindAllByUuid finds all virtual machines or hosts by UUID. +func (s SearchIndex) FindAllByUuid(ctx context.Context, dc *Datacenter, uuid string, vmSearch bool, instanceUuid *bool) ([]Reference, error) { + req := types.FindAllByUuid{ + This: s.Reference(), + Uuid: uuid, + VmSearch: vmSearch, + InstanceUuid: instanceUuid, + } + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.FindAllByUuid(ctx, s.c, &req) + if err != nil { + return nil, err + } + + if len(res.Returnval) == 0 { + return nil, nil + } + + var references []Reference + for _, returnval := range res.Returnval { + references = append(references, NewReference(s.c, returnval)) + } + return references, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/task.go b/vendor/github.com/vmware/govmomi/object/task.go index 2b66aa93b..088dd7f56 100644 --- a/vendor/github.com/vmware/govmomi/object/task.go +++ b/vendor/github.com/vmware/govmomi/object/task.go @@ -48,9 +48,13 @@ func (t *Task) Wait(ctx context.Context) error { return err } -func (t *Task) WaitForResult(ctx context.Context, s progress.Sinker) (*types.TaskInfo, error) { +func (t *Task) WaitForResult(ctx context.Context, s ...progress.Sinker) (*types.TaskInfo, error) { + var pr progress.Sinker + if len(s) == 1 { + pr = s[0] + } p := property.DefaultCollector(t.c) - return task.Wait(ctx, t.Reference(), p, s) + return task.Wait(ctx, t.Reference(), p, pr) } func (t *Task) Cancel(ctx context.Context) error { @@ -60,3 +64,37 @@ func (t *Task) Cancel(ctx context.Context) error { return err } + +// SetState sets task state and optionally sets results or fault, as appropriate for state. +func (t *Task) SetState(ctx context.Context, state types.TaskInfoState, result types.AnyType, fault *types.LocalizedMethodFault) error { + req := types.SetTaskState{ + This: t.Reference(), + State: state, + Result: result, + Fault: fault, + } + _, err := methods.SetTaskState(ctx, t.Common.Client(), &req) + return err +} + +// SetDescription updates task description to describe the current phase of the task. +func (t *Task) SetDescription(ctx context.Context, description types.LocalizableMessage) error { + req := types.SetTaskDescription{ + This: t.Reference(), + Description: description, + } + _, err := methods.SetTaskDescription(ctx, t.Common.Client(), &req) + return err +} + +// UpdateProgress Sets percentage done for this task and recalculates overall percentage done. +// If a percentDone value of less than zero or greater than 100 is specified, +// a value of zero or 100 respectively is used. +func (t *Task) UpdateProgress(ctx context.Context, percentDone int) error { + req := types.UpdateProgress{ + This: t.Reference(), + PercentDone: int32(percentDone), + } + _, err := methods.UpdateProgress(ctx, t.Common.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_app.go b/vendor/github.com/vmware/govmomi/object/virtual_app.go index 4811178f1..b7311b3e3 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_app.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_app.go @@ -103,3 +103,19 @@ func (p VirtualApp) Suspend(ctx context.Context) (*Task, error) { return NewTask(p.c, res.Returnval), nil } + +func (p VirtualApp) Clone(ctx context.Context, name string, target types.ManagedObjectReference, spec types.VAppCloneSpec) (*Task, error) { + req := types.CloneVApp_Task{ + This: p.Reference(), + Name: name, + Target: target, + Spec: spec, + } + + res, err := methods.CloneVApp_Task(ctx, p.c, &req) + if err != nil { + return nil, err + } + + return NewTask(p.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go index e1c35eff8..c45f553ab 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_device_list.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_device_list.go @@ -19,6 +19,7 @@ package object import ( "errors" "fmt" + "math/rand" "path/filepath" "reflect" "regexp" @@ -67,7 +68,13 @@ func EthernetCardTypes() VirtualDeviceList { &types.VirtualSriovEthernetCard{}, }).Select(func(device types.BaseVirtualDevice) bool { c := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() - c.GetVirtualDevice().Key = -1 + + key := rand.Int31() * -1 + if key == 0 { + key = -1 + } + + c.GetVirtualDevice().Key = key return true }) } @@ -134,6 +141,9 @@ func (l VirtualDeviceList) SelectByBackingInfo(backing types.BaseVirtualDeviceBa b := backing.(*types.VirtualEthernetCardDistributedVirtualPortBackingInfo) return a.Port.SwitchUuid == b.Port.SwitchUuid && a.Port.PortgroupKey == b.Port.PortgroupKey + case *types.VirtualEthernetCardOpaqueNetworkBackingInfo: + b := backing.(*types.VirtualEthernetCardOpaqueNetworkBackingInfo) + return a.OpaqueNetworkId == b.OpaqueNetworkId case *types.VirtualDiskFlatVer2BackingInfo: b := backing.(*types.VirtualDiskFlatVer2BackingInfo) if a.Parent != nil && b.Parent != nil { @@ -229,8 +239,10 @@ func (l VirtualDeviceList) FindSCSIController(name string) (*types.VirtualSCSICo func (l VirtualDeviceList) CreateSCSIController(name string) (types.BaseVirtualDevice, error) { ctypes := SCSIControllerTypes() - if name == "scsi" || name == "" { + if name == "" || name == "scsi" { name = ctypes.Type(ctypes[0]) + } else if name == "virtualscsi" { + name = "pvscsi" // ovf VirtualSCSI mapping } found := ctypes.Select(func(device types.BaseVirtualDevice) bool { @@ -431,7 +443,7 @@ func (l VirtualDeviceList) AssignController(device types.BaseVirtualDevice, c ty d.UnitNumber = new(int32) *d.UnitNumber = l.newUnitNumber(c) if d.Key == 0 { - d.Key = -1 + d.Key = int32(rand.Uint32()) * -1 } } diff --git a/vendor/github.com/vmware/govmomi/object/virtual_machine.go b/vendor/github.com/vmware/govmomi/object/virtual_machine.go index ba22e554f..bb731800c 100644 --- a/vendor/github.com/vmware/govmomi/object/virtual_machine.go +++ b/vendor/github.com/vmware/govmomi/object/virtual_machine.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,6 +22,7 @@ import ( "fmt" "net" "path" + "strings" "github.com/vmware/govmomi/nfc" "github.com/vmware/govmomi/property" @@ -33,12 +34,41 @@ import ( const ( PropRuntimePowerState = "summary.runtime.powerState" + PropConfigTemplate = "summary.config.template" ) type VirtualMachine struct { Common } +// extractDiskLayoutFiles is a helper function used to extract file keys for +// all disk files attached to the virtual machine at the current point of +// running. +func extractDiskLayoutFiles(diskLayoutList []types.VirtualMachineFileLayoutExDiskLayout) []int { + var result []int + + for _, layoutExDisk := range diskLayoutList { + for _, link := range layoutExDisk.Chain { + for i := range link.FileKey { // diskDescriptor, diskExtent pairs + result = append(result, int(link.FileKey[i])) + } + } + } + + return result +} + +// removeKey is a helper function for removing a specific file key from a list +// of keys associated with disks attached to a virtual machine. +func removeKey(l *[]int, key int) { + for i, k := range *l { + if k == key { + *l = append((*l)[:i], (*l)[i+1:]...) + break + } + } +} + func NewVirtualMachine(c *vim25.Client, ref types.ManagedObjectReference) *VirtualMachine { return &VirtualMachine{ Common: NewCommon(c, ref), @@ -56,6 +86,17 @@ func (v VirtualMachine) PowerState(ctx context.Context) (types.VirtualMachinePow return o.Summary.Runtime.PowerState, nil } +func (v VirtualMachine) IsTemplate(ctx context.Context) (bool, error) { + var o mo.VirtualMachine + + err := v.Properties(ctx, v.Reference(), []string{PropConfigTemplate}, &o) + if err != nil { + return false, err + } + + return o.Summary.Config.Template, nil +} + func (v VirtualMachine) PowerOn(ctx context.Context) (*Task, error) { req := types.PowerOnVM_Task{ This: v.Reference(), @@ -169,6 +210,20 @@ func (v VirtualMachine) Clone(ctx context.Context, folder *Folder, name string, return NewTask(v.c, res.Returnval), nil } +func (v VirtualMachine) InstantClone(ctx context.Context, config types.VirtualMachineInstantCloneSpec) (*Task, error) { + req := types.InstantClone_Task{ + This: v.Reference(), + Spec: config, + } + + res, err := methods.InstantClone_Task(ctx, v.c, &req) + if err != nil { + return nil, err + } + + return NewTask(v.c, res.Returnval), nil +} + func (v VirtualMachine) Customize(ctx context.Context, spec types.CustomizationSpec) (*Task, error) { req := types.CustomizeVM_Task{ This: v.Reference(), @@ -221,7 +276,9 @@ func (v VirtualMachine) RefreshStorageInfo(ctx context.Context) error { return err } -func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) { +// WaitForIP waits for the VM guest.ipAddress property to report an IP address. +// Waits for an IPv4 address if the v4 param is true. +func (v VirtualMachine) WaitForIP(ctx context.Context, v4 ...bool) (string, error) { var ip string p := property.DefaultCollector(v.c) @@ -238,6 +295,11 @@ func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) { } ip = c.Val.(string) + if len(v4) == 1 && v4[0] { + if net.ParseIP(ip).To4() == nil { + return false + } + } return true } @@ -272,7 +334,9 @@ func (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool, device ...str devices := VirtualDeviceList(c.Val.(types.ArrayOfVirtualDevice).VirtualDevice) for _, d := range devices { if nic, ok := d.(types.BaseVirtualEthernetCard); ok { - mac := nic.GetVirtualEthernetCard().MacAddress + // Convert to lower so that e.g. 00:50:56:83:3A:5D is treated the + // same as 00:50:56:83:3a:5d + mac := strings.ToLower(nic.GetVirtualEthernetCard().MacAddress) if mac == "" { return false } @@ -285,6 +349,10 @@ func (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool, device ...str return true }) + if err != nil { + return nil, err + } + if len(device) != 0 { // Only wait for specific NIC(s) macs = make(map[string][]string) @@ -304,7 +372,9 @@ func (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool, device ...str nics := c.Val.(types.ArrayOfGuestNicInfo).GuestNicInfo for _, nic := range nics { - mac := nic.MacAddress + // Convert to lower so that e.g. 00:50:56:83:3A:5D is treated the + // same as 00:50:56:83:3a:5d + mac := strings.ToLower(nic.MacAddress) if mac == "" || nic.IpConfig == nil { continue } @@ -446,6 +516,41 @@ func (v VirtualMachine) RemoveDevice(ctx context.Context, keepFiles bool, device return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationRemove, fop, device...) } +// AttachDisk attaches the given disk to the VirtualMachine +func (v VirtualMachine) AttachDisk(ctx context.Context, id string, datastore *Datastore, controllerKey int32, unitNumber int32) error { + req := types.AttachDisk_Task{ + This: v.Reference(), + DiskId: types.ID{Id: id}, + Datastore: datastore.Reference(), + ControllerKey: controllerKey, + UnitNumber: &unitNumber, + } + + res, err := methods.AttachDisk_Task(ctx, v.c, &req) + if err != nil { + return err + } + + task := NewTask(v.c, res.Returnval) + return task.Wait(ctx) +} + +// DetachDisk detaches the given disk from the VirtualMachine +func (v VirtualMachine) DetachDisk(ctx context.Context, id string) error { + req := types.DetachDisk_Task{ + This: v.Reference(), + DiskId: types.ID{Id: id}, + } + + res, err := methods.DetachDisk_Task(ctx, v.c, &req) + if err != nil { + return err + } + + task := NewTask(v.c, res.Returnval) + return task.Wait(ctx) +} + // BootOptions returns the VirtualMachine's config.bootOptions property. func (v VirtualMachine) BootOptions(ctx context.Context) (*types.VirtualMachineBootOptions, error) { var o mo.VirtualMachine @@ -556,6 +661,63 @@ func (m snapshotMap) add(parent string, tree []types.VirtualMachineSnapshotTree) } } +// SnapshotSize calculates the size of a given snapshot in bytes. If the +// snapshot is current, disk files not associated with any parent snapshot are +// included in size calculations. This allows for measuring and including the +// growth from the last fixed snapshot to the present state. +func SnapshotSize(info types.ManagedObjectReference, parent *types.ManagedObjectReference, vmlayout *types.VirtualMachineFileLayoutEx, isCurrent bool) int { + var fileKeyList []int + var parentFiles []int + var allSnapshotFiles []int + + diskFiles := extractDiskLayoutFiles(vmlayout.Disk) + + for _, layout := range vmlayout.Snapshot { + diskLayout := extractDiskLayoutFiles(layout.Disk) + allSnapshotFiles = append(allSnapshotFiles, diskLayout...) + + if layout.Key.Value == info.Value { + fileKeyList = append(fileKeyList, int(layout.DataKey)) // The .vmsn file + fileKeyList = append(fileKeyList, diskLayout...) // The .vmdk files + } else if parent != nil && layout.Key.Value == parent.Value { + parentFiles = append(parentFiles, diskLayout...) + } + } + + for _, parentFile := range parentFiles { + removeKey(&fileKeyList, parentFile) + } + + for _, file := range allSnapshotFiles { + removeKey(&diskFiles, file) + } + + fileKeyMap := make(map[int]types.VirtualMachineFileLayoutExFileInfo) + for _, file := range vmlayout.File { + fileKeyMap[int(file.Key)] = file + } + + size := 0 + + for _, fileKey := range fileKeyList { + file := fileKeyMap[fileKey] + if parent != nil || + (file.Type != string(types.VirtualMachineFileLayoutExFileTypeDiskDescriptor) && + file.Type != string(types.VirtualMachineFileLayoutExFileTypeDiskExtent)) { + size += int(file.Size) + } + } + + if isCurrent { + for _, diskFile := range diskFiles { + file := fileKeyMap[diskFile] + size += int(file.Size) + } + } + + return size +} + // FindSnapshot supports snapshot lookup by name, where name can be: // 1) snapshot ManagedObjectReference.Value (unique) // 2) snapshot name (may not be unique) @@ -569,7 +731,7 @@ func (v VirtualMachine) FindSnapshot(ctx context.Context, name string) (*types.M } if o.Snapshot == nil || len(o.Snapshot.RootSnapshotList) == 0 { - return nil, errors.New("No snapshots for this VM") + return nil, errors.New("no snapshots for this VM") } m := make(snapshotMap) @@ -832,6 +994,72 @@ func (v VirtualMachine) UUID(ctx context.Context) string { if err != nil { return "" } + if o.Config != nil { + return o.Config.Uuid + } + return "" +} + +func (v VirtualMachine) QueryChangedDiskAreas(ctx context.Context, baseSnapshot, curSnapshot *types.ManagedObjectReference, disk *types.VirtualDisk, offset int64) (types.DiskChangeInfo, error) { + var noChange types.DiskChangeInfo + var err error + + if offset > disk.CapacityInBytes { + return noChange, fmt.Errorf("offset is greater than the disk size (%#x and %#x)", offset, disk.CapacityInBytes) + } else if offset == disk.CapacityInBytes { + return types.DiskChangeInfo{StartOffset: offset, Length: 0}, nil + } + + var b mo.VirtualMachineSnapshot + err = v.Properties(ctx, baseSnapshot.Reference(), []string{"config.hardware"}, &b) + if err != nil { + return noChange, fmt.Errorf("failed to fetch config.hardware of snapshot %s: %s", baseSnapshot, err) + } + + var changeId *string + for _, vd := range b.Config.Hardware.Device { + d := vd.GetVirtualDevice() + if d.Key != disk.Key { + continue + } + + // As per VDDK programming guide, these are the four types of disks + // that support CBT, see "Gathering Changed Block Information". + if b, ok := d.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { + changeId = &b.ChangeId + break + } + if b, ok := d.Backing.(*types.VirtualDiskSparseVer2BackingInfo); ok { + changeId = &b.ChangeId + break + } + if b, ok := d.Backing.(*types.VirtualDiskRawDiskMappingVer1BackingInfo); ok { + changeId = &b.ChangeId + break + } + if b, ok := d.Backing.(*types.VirtualDiskRawDiskVer2BackingInfo); ok { + changeId = &b.ChangeId + break + } + + return noChange, fmt.Errorf("disk %d has backing info without .ChangeId: %t", disk.Key, d.Backing) + } + if changeId == nil || *changeId == "" { + return noChange, fmt.Errorf("CBT is not enabled on disk %d", disk.Key) + } + + req := types.QueryChangedDiskAreas{ + This: v.Reference(), + Snapshot: curSnapshot, + DeviceKey: disk.Key, + StartOffset: offset, + ChangeId: *changeId, + } + + res, err := methods.QueryChangedDiskAreas(ctx, v.Client(), &req) + if err != nil { + return noChange, err + } - return o.Config.Uuid + return res.Returnval, nil } diff --git a/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go b/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go index f6caf9870..8eccba19b 100644 --- a/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go +++ b/vendor/github.com/vmware/govmomi/object/vmware_distributed_virtual_switch.go @@ -19,3 +19,7 @@ package object type VmwareDistributedVirtualSwitch struct { DistributedVirtualSwitch } + +func (s VmwareDistributedVirtualSwitch) GetInventoryPath() string { + return s.InventoryPath +} diff --git a/vendor/github.com/vmware/govmomi/pbm/client.go b/vendor/github.com/vmware/govmomi/pbm/client.go index 3e799ebfb..ba2531ec9 100644 --- a/vendor/github.com/vmware/govmomi/pbm/client.go +++ b/vendor/github.com/vmware/govmomi/pbm/client.go @@ -43,6 +43,8 @@ type Client struct { *soap.Client ServiceContent types.PbmServiceInstanceContent + + RoundTripper soap.RoundTripper } func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { @@ -57,7 +59,12 @@ func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { return nil, err } - return &Client{sc, res.Returnval}, nil + return &Client{sc, res.Returnval, sc}, nil +} + +// RoundTrip dispatches to the RoundTripper field. +func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + return c.RoundTripper.RoundTrip(ctx, req, res) } func (c *Client) QueryProfile(ctx context.Context, rtype types.PbmProfileResourceType, category string) ([]types.PbmProfileId, error) { @@ -224,3 +231,57 @@ func (c *Client) ProfileIDByName(ctx context.Context, profileName string) (strin } return "", fmt.Errorf("no pbm profile found with name: %q", profileName) } + +func (c *Client) FetchCapabilityMetadata(ctx context.Context, rtype *types.PbmProfileResourceType, vendorUuid string) ([]types.PbmCapabilityMetadataPerCategory, error) { + req := types.PbmFetchCapabilityMetadata{ + This: c.ServiceContent.ProfileManager, + ResourceType: rtype, + VendorUuid: vendorUuid, + } + + res, err := methods.PbmFetchCapabilityMetadata(ctx, c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +func (c *Client) FetchComplianceResult(ctx context.Context, entities []types.PbmServerObjectRef) ([]types.PbmComplianceResult, error) { + req := types.PbmFetchComplianceResult{ + This: c.ServiceContent.ComplianceManager, + Entities: entities, + } + + res, err := methods.PbmFetchComplianceResult(ctx, c, &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +// GetProfileNameByID gets storage profile name by ID +func (c *Client) GetProfileNameByID(ctx context.Context, profileID string) (string, error) { + resourceType := types.PbmProfileResourceType{ + ResourceType: string(types.PbmProfileResourceTypeEnumSTORAGE), + } + category := types.PbmProfileCategoryEnumREQUIREMENT + ids, err := c.QueryProfile(ctx, resourceType, string(category)) + if err != nil { + return "", err + } + + profiles, err := c.RetrieveContent(ctx, ids) + if err != nil { + return "", err + } + + for i := range profiles { + profile := profiles[i].GetPbmProfile() + if profile.ProfileId.UniqueId == profileID { + return profile.Name, nil + } + } + return "", fmt.Errorf("no pbm profile found with id: %q", profileID) +} diff --git a/vendor/github.com/vmware/govmomi/pbm/methods/methods.go b/vendor/github.com/vmware/govmomi/pbm/methods/methods.go index 177677306..075c0f746 100644 --- a/vendor/github.com/vmware/govmomi/pbm/methods/methods.go +++ b/vendor/github.com/vmware/govmomi/pbm/methods/methods.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vmware/govmomi/pbm/pbm_util.go b/vendor/github.com/vmware/govmomi/pbm/pbm_util.go index 4cc8085f3..d773b8dbb 100644 --- a/vendor/github.com/vmware/govmomi/pbm/pbm_util.go +++ b/vendor/github.com/vmware/govmomi/pbm/pbm_util.go @@ -27,6 +27,7 @@ import ( // A struct to capture pbm create spec details. type CapabilityProfileCreateSpec struct { Name string + SubProfileName string Description string Category string CapabilityList []Capability @@ -64,6 +65,7 @@ func CreateCapabilityProfileSpec(pbmCreateSpec CapabilityProfileCreateSpec) (*ty SubProfiles: []types.PbmCapabilitySubProfile{ types.PbmCapabilitySubProfile{ Capability: capabilities, + Name: pbmCreateSpec.SubProfileName, }, }, }, diff --git a/vendor/github.com/vmware/govmomi/pbm/types/enum.go b/vendor/github.com/vmware/govmomi/pbm/types/enum.go index ddd1a5f78..6ebfa3729 100644 --- a/vendor/github.com/vmware/govmomi/pbm/types/enum.go +++ b/vendor/github.com/vmware/govmomi/pbm/types/enum.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,6 +22,18 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +type PbmAssociateAndApplyPolicyStatusPolicyStatus string + +const ( + PbmAssociateAndApplyPolicyStatusPolicyStatusSuccess = PbmAssociateAndApplyPolicyStatusPolicyStatus("success") + PbmAssociateAndApplyPolicyStatusPolicyStatusFailed = PbmAssociateAndApplyPolicyStatusPolicyStatus("failed") + PbmAssociateAndApplyPolicyStatusPolicyStatusInvalid = PbmAssociateAndApplyPolicyStatusPolicyStatus("invalid") +) + +func init() { + types.Add("pbm:PbmAssociateAndApplyPolicyStatusPolicyStatus", reflect.TypeOf((*PbmAssociateAndApplyPolicyStatusPolicyStatus)(nil)).Elem()) +} + type PbmBuiltinGenericType string const ( @@ -104,6 +116,19 @@ func init() { types.Add("pbm:PbmComplianceStatus", reflect.TypeOf((*PbmComplianceStatus)(nil)).Elem()) } +type PbmHealthStatusForEntity string + +const ( + PbmHealthStatusForEntityRed = PbmHealthStatusForEntity("red") + PbmHealthStatusForEntityYellow = PbmHealthStatusForEntity("yellow") + PbmHealthStatusForEntityGreen = PbmHealthStatusForEntity("green") + PbmHealthStatusForEntityUnknown = PbmHealthStatusForEntity("unknown") +) + +func init() { + types.Add("pbm:PbmHealthStatusForEntity", reflect.TypeOf((*PbmHealthStatusForEntity)(nil)).Elem()) +} + type PbmIofilterInfoFilterType string const ( @@ -131,6 +156,7 @@ const ( PbmLineOfServiceInfoLineOfServiceEnumPERSISTENCE = PbmLineOfServiceInfoLineOfServiceEnum("PERSISTENCE") PbmLineOfServiceInfoLineOfServiceEnumDATA_PROVIDER = PbmLineOfServiceInfoLineOfServiceEnum("DATA_PROVIDER") PbmLineOfServiceInfoLineOfServiceEnumDATASTORE_IO_CONTROL = PbmLineOfServiceInfoLineOfServiceEnum("DATASTORE_IO_CONTROL") + PbmLineOfServiceInfoLineOfServiceEnumDATA_PROTECTION = PbmLineOfServiceInfoLineOfServiceEnum("DATA_PROTECTION") ) func init() { @@ -145,6 +171,8 @@ const ( PbmObjectTypeVirtualDiskId = PbmObjectType("virtualDiskId") PbmObjectTypeVirtualDiskUUID = PbmObjectType("virtualDiskUUID") PbmObjectTypeDatastore = PbmObjectType("datastore") + PbmObjectTypeVsanObjectId = PbmObjectType("vsanObjectId") + PbmObjectTypeFileShareId = PbmObjectType("fileShareId") PbmObjectTypeUnknown = PbmObjectType("unknown") ) diff --git a/vendor/github.com/vmware/govmomi/pbm/types/if.go b/vendor/github.com/vmware/govmomi/pbm/types/if.go index 832df5e1d..0cc311acb 100644 --- a/vendor/github.com/vmware/govmomi/pbm/types/if.go +++ b/vendor/github.com/vmware/govmomi/pbm/types/if.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vmware/govmomi/pbm/types/types.go b/vendor/github.com/vmware/govmomi/pbm/types/types.go index 015482d08..206aa03ea 100644 --- a/vendor/github.com/vmware/govmomi/pbm/types/types.go +++ b/vendor/github.com/vmware/govmomi/pbm/types/types.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -997,6 +997,17 @@ type PbmFetchComplianceResultResponse struct { Returnval []PbmComplianceResult `xml:"returnval,omitempty"` } +type PbmFetchEntityHealthStatusSpec struct { + types.DynamicData + + ObjectRef PbmServerObjectRef `xml:"objectRef"` + BackingId string `xml:"backingId,omitempty"` +} + +func init() { + types.Add("pbm:PbmFetchEntityHealthStatusSpec", reflect.TypeOf((*PbmFetchEntityHealthStatusSpec)(nil)).Elem()) +} + type PbmFetchResourceType PbmFetchResourceTypeRequestType func init() { diff --git a/vendor/github.com/vmware/govmomi/program.mk b/vendor/github.com/vmware/govmomi/program.mk index 9c6996aea..f24939934 100644 --- a/vendor/github.com/vmware/govmomi/program.mk +++ b/vendor/github.com/vmware/govmomi/program.mk @@ -1,3 +1,6 @@ +# Copyright (c) 2021 VMware, Inc. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + ifneq (,$(strip $(GOOS))) ifeq (,$(strip $(GOARCH))) GOARCH := $(shell go env | grep GOARCH | awk -F= '{print $$2}' | tr -d '"') @@ -30,7 +33,7 @@ $(PROGRAM): CGO_ENABLED=0 go build -a $(BUILD_ARGS) -o $@ install: - CGO_ENABLED=0 go install $(BUILD_ARGS) + CGO_ENABLED=0 go install -v $(BUILD_ARGS) ifneq (,$(strip $(BUILD_OS))) ifneq (,$(strip $(BUILD_ARCH))) diff --git a/vendor/github.com/vmware/govmomi/property/collector.go b/vendor/github.com/vmware/govmomi/property/collector.go index b77e60061..8798ceacb 100644 --- a/vendor/github.com/vmware/govmomi/property/collector.go +++ b/vendor/github.com/vmware/govmomi/property/collector.go @@ -97,10 +97,16 @@ func (p *Collector) CreateFilter(ctx context.Context, req types.CreateFilter) er return nil } -func (p *Collector) WaitForUpdates(ctx context.Context, v string) (*types.UpdateSet, error) { +func (p *Collector) WaitForUpdates(ctx context.Context, version string, opts ...*types.WaitOptions) (*types.UpdateSet, error) { req := types.WaitForUpdatesEx{ This: p.Reference(), - Version: v, + Version: version, + } + + if len(opts) == 1 { + req.Options = opts[0] + } else if len(opts) > 1 { + panic("only one option may be specified") } res, err := methods.WaitForUpdatesEx(ctx, p.roundTripper, &req) @@ -143,7 +149,7 @@ func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectRefe spec := types.PropertySpec{ Type: obj.Type, } - if ps == nil { + if len(ps) == 0 { spec.All = types.NewBool(true) } else { spec.PathSet = ps @@ -179,7 +185,7 @@ func (p *Collector) Retrieve(ctx context.Context, objs []types.ManagedObjectRefe return nil } - return mo.LoadRetrievePropertiesResponse(res, dst) + return mo.LoadObjectContent(res.Returnval, dst) } // RetrieveWithFilter populates dst as Retrieve does, but only for entities matching the given filter. diff --git a/vendor/github.com/vmware/govmomi/property/filter.go b/vendor/github.com/vmware/govmomi/property/filter.go index a4bf16d05..2287bbfd9 100644 --- a/vendor/github.com/vmware/govmomi/property/filter.go +++ b/vendor/github.com/vmware/govmomi/property/filter.go @@ -42,6 +42,9 @@ func (f Filter) Keys() []string { // MatchProperty returns true if a Filter entry matches the given prop. func (f Filter) MatchProperty(prop types.DynamicProperty) bool { + if prop.Val == nil { + return false + } match, ok := f[prop.Name] if !ok { return false @@ -74,7 +77,7 @@ func (f Filter) MatchProperty(prop types.DynamicProperty) bool { } // convert if we can - switch prop.Val.(type) { + switch val := prop.Val.(type) { case bool: match, _ = strconv.ParseBool(s) case int16: @@ -91,7 +94,9 @@ func (f Filter) MatchProperty(prop types.DynamicProperty) bool { case float64: match, _ = strconv.ParseFloat(s, 64) case fmt.Stringer: - prop.Val = prop.Val.(fmt.Stringer).String() + prop.Val = val.String() + case *types.CustomFieldStringValue: + prop.Val = fmt.Sprintf("%d:%s", val.Key, val.Value) default: if ptype.Kind() != reflect.String { return false @@ -125,7 +130,7 @@ func (f Filter) MatchPropertyList(props []types.DynamicProperty) bool { return len(f) == len(props) // false if a property such as VM "guest" is unset } -// MatchObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches the Filter. +// MatchObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches all properties the Filter. func (f Filter) MatchObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference { var refs []types.ManagedObjectReference @@ -137,3 +142,27 @@ func (f Filter) MatchObjectContent(objects []types.ObjectContent) []types.Manage return refs } + +// MatchAnyPropertyList returns true if any given props match the Filter. +func (f Filter) MatchAnyPropertyList(props []types.DynamicProperty) bool { + for _, p := range props { + if f.MatchProperty(p) { + return true + } + } + + return false +} + +// MatchAnyObjectContent returns a list of ObjectContent.Obj where the ObjectContent.PropSet matches any property in the Filter. +func (f Filter) MatchAnyObjectContent(objects []types.ObjectContent) []types.ManagedObjectReference { + var refs []types.ManagedObjectReference + + for _, o := range objects { + if f.MatchAnyPropertyList(o.PropSet) { + refs = append(refs, o.Obj) + } + } + + return refs +} diff --git a/vendor/github.com/vmware/govmomi/property/wait.go b/vendor/github.com/vmware/govmomi/property/wait.go index f730525ca..fbb680771 100644 --- a/vendor/github.com/vmware/govmomi/property/wait.go +++ b/vendor/github.com/vmware/govmomi/property/wait.go @@ -20,13 +20,16 @@ import ( "context" "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) // WaitFilter provides helpers to construct a types.CreateFilter for use with property.Wait type WaitFilter struct { types.CreateFilter - Options *types.WaitOptions + Options *types.WaitOptions + PropagateMissing bool + Truncated bool } // Add a new ObjectSpec and PropertySpec to the WaitFilter @@ -81,6 +84,8 @@ func Wait(ctx context.Context, c *Collector, obj types.ManagedObjectReference, p // The newly created collector is destroyed before this function returns (both // in case of success or error). // +// By default, ObjectUpdate.MissingSet faults are not propagated to the returned error, +// set WaitFilter.PropagateMissing=true to enable MissingSet fault propagation. func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f func([]types.ObjectUpdate) bool) error { p, err := c.Create(ctx) if err != nil { @@ -89,7 +94,9 @@ func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f fun // Attempt to destroy the collector using the background context, as the // specified context may have timed out or have been canceled. - defer p.Destroy(context.Background()) + defer func() { + _ = p.Destroy(context.Background()) + }() err = p.CreateFilter(ctx, filter.CreateFilter) if err != nil { @@ -121,8 +128,21 @@ func WaitForUpdates(ctx context.Context, c *Collector, filter *WaitFilter, f fun } req.Version = set.Version + filter.Truncated = false + if set.Truncated != nil { + filter.Truncated = *set.Truncated + } for _, fs := range set.FilterSet { + if filter.PropagateMissing { + for i := range fs.ObjectSet { + for _, p := range fs.ObjectSet[i].MissingSet { + // Same behavior as mo.ObjectContentToType() + return soap.WrapVimFault(p.Fault.Fault) + } + } + } + if f(fs.ObjectSet) { return nil } diff --git a/vendor/github.com/vmware/govmomi/session/keep_alive.go b/vendor/github.com/vmware/govmomi/session/keep_alive.go index 3b44f5ffb..7c1bebf91 100644 --- a/vendor/github.com/vmware/govmomi/session/keep_alive.go +++ b/vendor/github.com/vmware/govmomi/session/keep_alive.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2020 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,110 +17,24 @@ limitations under the License. package session import ( - "context" - "sync" "time" - "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/session/keepalive" "github.com/vmware/govmomi/vim25/soap" ) -type keepAlive struct { - sync.Mutex - - roundTripper soap.RoundTripper - idleTime time.Duration - notifyRequest chan struct{} - notifyStop chan struct{} - notifyWaitGroup sync.WaitGroup - - // keepAlive executes a request in the background with the purpose of - // keeping the session active. The response for this request is discarded. - keepAlive func(soap.RoundTripper) error -} - -func defaultKeepAlive(roundTripper soap.RoundTripper) error { - _, _ = methods.GetCurrentTime(context.Background(), roundTripper) - return nil -} - -// KeepAlive wraps the specified soap.RoundTripper and executes a meaningless -// API request in the background after the RoundTripper has been idle for the -// specified amount of idle time. The keep alive process only starts once a -// user logs in and runs until the user logs out again. +// KeepAlive is a backward compatible wrapper around KeepAliveHandler. func KeepAlive(roundTripper soap.RoundTripper, idleTime time.Duration) soap.RoundTripper { - return KeepAliveHandler(roundTripper, idleTime, defaultKeepAlive) + return KeepAliveHandler(roundTripper, idleTime, nil) } -// KeepAliveHandler works as KeepAlive() does, but the handler param can decide how to handle errors. -// For example, if connectivity to ESX/VC is down long enough for a session to expire, a handler can choose to -// Login() on a types.NotAuthenticated error. If handler returns non-nil, the keep alive go routine will be stopped. +// KeepAliveHandler is a backward compatible wrapper around keepalive.NewHandlerSOAP. func KeepAliveHandler(roundTripper soap.RoundTripper, idleTime time.Duration, handler func(soap.RoundTripper) error) soap.RoundTripper { - k := &keepAlive{ - roundTripper: roundTripper, - idleTime: idleTime, - notifyRequest: make(chan struct{}), - } - - k.keepAlive = handler - - return k -} - -func (k *keepAlive) start() { - k.Lock() - defer k.Unlock() - - if k.notifyStop != nil { - return - } - - // This channel must be closed to terminate idle timer. - k.notifyStop = make(chan struct{}) - k.notifyWaitGroup.Add(1) - - go func() { - defer k.notifyWaitGroup.Done() - - for t := time.NewTimer(k.idleTime); ; { - select { - case <-k.notifyStop: - return - case <-k.notifyRequest: - t.Reset(k.idleTime) - case <-t.C: - if err := k.keepAlive(k.roundTripper); err != nil { - k.stop() - } - t = time.NewTimer(k.idleTime) - } + var f func() error + if handler != nil { + f = func() error { + return handler(roundTripper) } - }() -} - -func (k *keepAlive) stop() { - k.Lock() - defer k.Unlock() - - if k.notifyStop != nil { - close(k.notifyStop) - k.notifyWaitGroup.Wait() - k.notifyStop = nil - } -} - -func (k *keepAlive) RoundTrip(ctx context.Context, req, res soap.HasFault) error { - err := k.roundTripper.RoundTrip(ctx, req, res) - if err != nil { - return err - } - // Start ticker on login, stop ticker on logout. - switch req.(type) { - case *methods.LoginBody, *methods.LoginExtensionByCertificateBody, *methods.LoginByTokenBody: - k.start() - case *methods.LogoutBody: - k.stop() } - - return nil + return keepalive.NewHandlerSOAP(roundTripper, idleTime, f) } diff --git a/vendor/github.com/vmware/govmomi/session/keepalive/handler.go b/vendor/github.com/vmware/govmomi/session/keepalive/handler.go new file mode 100644 index 000000000..3ebf046a5 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/session/keepalive/handler.go @@ -0,0 +1,204 @@ +/* +Copyright (c) 2020 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package keepalive + +import ( + "context" + "errors" + "net/http" + "sync" + "time" + + "github.com/vmware/govmomi/vapi/rest" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" +) + +// handler contains the generic keep alive settings and logic +type handler struct { + mu sync.Mutex + notifyStop chan struct{} + notifyWaitGroup sync.WaitGroup + + idle time.Duration + send func() error +} + +// NewHandlerSOAP returns a soap.RoundTripper for use with a vim25.Client +// The idle time specifies the interval in between send() requests. Defaults to 10 minutes. +// The send func is used to keep a session alive. Defaults to calling vim25 GetCurrentTime(). +// The keep alive goroutine starts when a Login method is called and runs until Logout is called or send returns an error. +func NewHandlerSOAP(c soap.RoundTripper, idle time.Duration, send func() error) *HandlerSOAP { + h := &handler{ + idle: idle, + send: send, + } + + if send == nil { + h.send = func() error { + return h.keepAliveSOAP(c) + } + } + + return &HandlerSOAP{h, c} +} + +// NewHandlerREST returns an http.RoundTripper for use with a rest.Client +// The idle time specifies the interval in between send() requests. Defaults to 10 minutes. +// The send func is used to keep a session alive. Defaults to calling the rest.Client.Session() method +// The keep alive goroutine starts when a Login method is called and runs until Logout is called or send returns an error. +func NewHandlerREST(c *rest.Client, idle time.Duration, send func() error) *HandlerREST { + h := &handler{ + idle: idle, + send: send, + } + + if send == nil { + h.send = func() error { + return h.keepAliveREST(c) + } + } + + return &HandlerREST{h, c.Transport} +} + +func (h *handler) keepAliveSOAP(rt soap.RoundTripper) error { + ctx := context.Background() + _, err := methods.GetCurrentTime(ctx, rt) + return err +} + +func (h *handler) keepAliveREST(c *rest.Client) error { + ctx := context.Background() + + s, err := c.Session(ctx) + if err != nil { + return err + } + if s != nil { + return nil + } + return errors.New(http.StatusText(http.StatusUnauthorized)) +} + +// Start explicitly starts the keep alive go routine. +// For use with session cache.Client, as cached sessions may not involve Login/Logout via RoundTripper. +func (h *handler) Start() { + h.mu.Lock() + defer h.mu.Unlock() + + if h.notifyStop != nil { + return + } + + if h.idle == 0 { + h.idle = time.Minute * 10 + } + + // This channel must be closed to terminate idle timer. + h.notifyStop = make(chan struct{}) + h.notifyWaitGroup.Add(1) + + go func() { + for t := time.NewTimer(h.idle); ; { + select { + case <-h.notifyStop: + h.notifyWaitGroup.Done() + t.Stop() + return + case <-t.C: + if err := h.send(); err != nil { + h.notifyWaitGroup.Done() + h.Stop() + return + } + t.Reset(h.idle) + } + } + }() +} + +// Stop explicitly stops the keep alive go routine. +// For use with session cache.Client, as cached sessions may not involve Login/Logout via RoundTripper. +func (h *handler) Stop() { + h.mu.Lock() + defer h.mu.Unlock() + + if h.notifyStop != nil { + close(h.notifyStop) + h.notifyWaitGroup.Wait() + h.notifyStop = nil + } +} + +// HandlerSOAP is a keep alive implementation for use with vim25.Client +type HandlerSOAP struct { + *handler + + roundTripper soap.RoundTripper +} + +// RoundTrip implements soap.RoundTripper +func (h *HandlerSOAP) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + // Stop ticker on logout. + switch req.(type) { + case *methods.LogoutBody: + h.Stop() + } + + err := h.roundTripper.RoundTrip(ctx, req, res) + if err != nil { + return err + } + + // Start ticker on login. + switch req.(type) { + case *methods.LoginBody, *methods.LoginExtensionByCertificateBody, *methods.LoginByTokenBody: + h.Start() + } + + return nil +} + +// HandlerREST is a keep alive implementation for use with rest.Client +type HandlerREST struct { + *handler + + roundTripper http.RoundTripper +} + +// RoundTrip implements http.RoundTripper +func (h *HandlerREST) RoundTrip(req *http.Request) (*http.Response, error) { + if req.URL.Path != "/rest/com/vmware/cis/session" { + return h.roundTripper.RoundTrip(req) + } + + if req.Method == http.MethodDelete { // Logout + h.Stop() + } + + res, err := h.roundTripper.RoundTrip(req) + if err != nil { + return res, err + } + + if req.Method == http.MethodPost { // Login + h.Start() + } + + return res, err +} diff --git a/vendor/github.com/vmware/govmomi/session/manager.go b/vendor/github.com/vmware/govmomi/session/manager.go index ad3219716..8689acd50 100644 --- a/vendor/github.com/vmware/govmomi/session/manager.go +++ b/vendor/github.com/vmware/govmomi/session/manager.go @@ -18,9 +18,10 @@ package session import ( "context" - "net/http" + "io/ioutil" "net/url" "os" + "strings" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25" @@ -41,6 +42,21 @@ func init() { } } +// Secret returns the contents if a file path value is given, otherwise returns value itself. +func Secret(value string) (string, error) { + if len(value) == 0 { + return value, nil + } + contents, err := ioutil.ReadFile(value) + if err != nil { + if os.IsPermission(err) { + return "", err + } + return value, nil + } + return strings.TrimSpace(string(contents)), nil +} + type Manager struct { client *vim25.Client userSession *types.UserSession @@ -107,7 +123,7 @@ func (sm *Manager) LoginExtensionByCertificate(ctx context.Context, key string) // "Post https://sdkTunnel:8089/sdk: x509: certificate is valid for $vcenter_hostname, not sdkTunnel" // The only easy way around this is to disable verification for the call to LoginExtensionByCertificate(). // TODO: find a way to avoid disabling InsecureSkipVerify. - c.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true + c.DefaultTransport().TLSClientConfig.InsecureSkipVerify = true } req := types.LoginExtensionByCertificate{ @@ -265,3 +281,14 @@ func (sm *Manager) CloneSession(ctx context.Context, ticket string) error { sm.userSession = &res.Returnval return nil } + +func (sm *Manager) UpdateServiceMessage(ctx context.Context, message string) error { + req := types.UpdateServiceMessage{ + This: sm.Reference(), + Message: message, + } + + _, err := methods.UpdateServiceMessage(ctx, sm.client, &req) + + return err +} diff --git a/vendor/github.com/vmware/govmomi/sts/client.go b/vendor/github.com/vmware/govmomi/sts/client.go index 70e02de09..d98c56095 100644 --- a/vendor/github.com/vmware/govmomi/sts/client.go +++ b/vendor/github.com/vmware/govmomi/sts/client.go @@ -38,6 +38,8 @@ const ( // Client is a soap.Client targeting the STS (Secure Token Service) API endpoint. type Client struct { *soap.Client + + RoundTripper soap.RoundTripper } // NewClient returns a client targeting the STS API endpoint. @@ -48,7 +50,7 @@ func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { filter := &types.LookupServiceRegistrationFilter{ ServiceType: &types.LookupServiceRegistrationServiceType{ Product: "com.vmware.cis", - Type: "sso:sts", + Type: "cs.identity", }, EndpointType: &types.LookupServiceRegistrationEndpointType{ Protocol: "wsTrust", @@ -59,11 +61,19 @@ func NewClient(ctx context.Context, c *vim25.Client) (*Client, error) { url := lookup.EndpointURL(ctx, c, Path, filter) sc := c.Client.NewServiceClient(url, Namespace) - return &Client{sc}, nil + return &Client{sc, sc}, nil +} + +// RoundTrip dispatches to the RoundTripper field. +func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { + return c.RoundTripper.RoundTrip(ctx, req, res) } // TokenRequest parameters for issuing a SAML token. // At least one of Userinfo or Certificate must be specified. +// When `TokenRequest.Certificate` is set, the `tls.Certificate.PrivateKey` field must be set as it is required to sign the request. +// When the `tls.Certificate.Certificate` field is not set, the request Assertion header is set to that of the TokenRequest.Token. +// Otherwise `tls.Certificate.Certificate` is used as the BinarySecurityToken in the request. type TokenRequest struct { Userinfo *url.Userinfo // Userinfo when set issues a Bearer token Certificate *tls.Certificate // Certificate when set issues a HoK token diff --git a/vendor/github.com/vmware/govmomi/sts/internal/types.go b/vendor/github.com/vmware/govmomi/sts/internal/types.go index dc8402b01..875dbe505 100644 --- a/vendor/github.com/vmware/govmomi/sts/internal/types.go +++ b/vendor/github.com/vmware/govmomi/sts/internal/types.go @@ -473,12 +473,12 @@ func (a *AttributeStatement) C14N() string { type AttributeValue struct { XMLName xml.Name - Type string `xml:"type,attr"` + Type string `xml:"type,attr,typeattr"` Value string `xml:",innerxml"` } func (a *AttributeValue) C14N() string { - return fmt.Sprintf(`%s`, XSI, a.Value) + return fmt.Sprintf(`%s`, XSI, a.Type, a.Value) } type Attribute struct { @@ -682,7 +682,7 @@ func Marshal(val interface{}) string { // Note that the namespace is required when encoding, but the namespace prefix must not be // present when decoding as Go's decoding does not handle namespace prefix. func mkns(ns string, obj interface{}, name ...*xml.Name) string { - ns = ns + ":" + ns += ":" for i := range name { name[i].Space = "" if !strings.HasPrefix(name[i].Local, ns) { diff --git a/vendor/github.com/vmware/govmomi/sts/signer.go b/vendor/github.com/vmware/govmomi/sts/signer.go index 6a3b042bc..6ee0ffdeb 100644 --- a/vendor/github.com/vmware/govmomi/sts/signer.go +++ b/vendor/github.com/vmware/govmomi/sts/signer.go @@ -30,12 +30,14 @@ import ( "io" "io/ioutil" mrand "math/rand" + "net" "net/http" "net/url" "strings" "time" "github.com/google/uuid" + "github.com/vmware/govmomi/sts/internal" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" @@ -126,22 +128,32 @@ func (s *Signer) Sign(env soap.Envelope) ([]byte, error) { req := x.RequestSecurityToken() c14n = req.C14N() body = req.String() - id := newID() - - info.SecurityTokenReference = &internal.SecurityTokenReference{ - Reference: &internal.SecurityReference{ - URI: "#" + id, - ValueType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3", - }, - } - header.BinarySecurityToken = &internal.BinarySecurityToken{ - EncodingType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary", - ValueType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3", - ID: id, - Value: base64.StdEncoding.EncodeToString(s.Certificate.Certificate[0]), + if len(s.Certificate.Certificate) == 0 { + header.Assertion = s.Token + if err := s.setTokenReference(&info); err != nil { + return nil, err + } + } else { + id := newID() + + header.BinarySecurityToken = &internal.BinarySecurityToken{ + EncodingType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary", + ValueType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3", + ID: id, + Value: base64.StdEncoding.EncodeToString(s.Certificate.Certificate[0]), + } + + info.SecurityTokenReference = &internal.SecurityTokenReference{ + Reference: &internal.SecurityReference{ + URI: "#" + id, + ValueType: "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3", + }, + } } - } else { + } + // When requesting HoK token for interactive user, request will have both priv. key and username/password. + if s.user.Username() != "" { header.UsernameToken = &internal.UsernameToken{ Username: s.user.Username(), } @@ -263,7 +275,7 @@ func (s *Signer) SignRequest(req *http.Request) error { return fmt.Errorf("sts: reading http.Request body: %s", rerr) } } - bhash := sha256.New().Sum(body) + bhash := sha256.Sum256(body) port := req.URL.Port() if port == "" { @@ -271,18 +283,25 @@ func (s *Signer) SignRequest(req *http.Request) error { } var buf bytes.Buffer + host := req.URL.Hostname() + + // Check if the host IP is in IPv6 format. If yes, add the opening and closing square brackets. + if isIPv6(host) { + host = fmt.Sprintf("%s%s%s", "[", host, "]") + } + msg := []string{ nonce, req.Method, req.URL.Path, - strings.ToLower(req.URL.Hostname()), + strings.ToLower(host), port, } for i := range msg { buf.WriteString(msg[i]) buf.WriteByte('\n') } - buf.Write(bhash) + buf.Write(bhash[:]) buf.WriteByte('\n') sum := sha256.Sum256(buf.Bytes()) @@ -309,7 +328,7 @@ func (s *Signer) SignRequest(req *http.Request) error { }) add(param{ key: "bodyhash", - val: base64.StdEncoding.EncodeToString(bhash), + val: base64.StdEncoding.EncodeToString(bhash[:]), }) } @@ -326,3 +345,11 @@ func (s *Signer) NewRequest() TokenRequest { KeyID: s.keyID, } } + +func isIPv6(s string) bool { + ip := net.ParseIP(s) + if ip == nil { + return false + } + return ip.To4() == nil +} diff --git a/vendor/github.com/vmware/govmomi/task/error.go b/vendor/github.com/vmware/govmomi/task/error.go index 5f6b8503f..3fff5aa26 100644 --- a/vendor/github.com/vmware/govmomi/task/error.go +++ b/vendor/github.com/vmware/govmomi/task/error.go @@ -20,6 +20,7 @@ import "github.com/vmware/govmomi/vim25/types" type Error struct { *types.LocalizedMethodFault + Description *types.LocalizableMessage } // Error returns the task's localized fault message. diff --git a/vendor/github.com/vmware/govmomi/task/history_collector.go b/vendor/github.com/vmware/govmomi/task/history_collector.go new file mode 100644 index 000000000..dd9b70a1b --- /dev/null +++ b/vendor/github.com/vmware/govmomi/task/history_collector.go @@ -0,0 +1,86 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package task + +import ( + "context" + + "github.com/vmware/govmomi/history" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +// HistoryCollector provides a mechanism for retrieving historical data and +// updates when the server appends new tasks. +type HistoryCollector struct { + *history.Collector +} + +func newHistoryCollector(c *vim25.Client, ref types.ManagedObjectReference) *HistoryCollector { + return &HistoryCollector{ + Collector: history.NewCollector(c, ref), + } +} + +// RecentTasks returns a list of task managed objects that completed recently, +// that are currently running, or that are queued to run. +func (h HistoryCollector) RecentTasks(ctx context.Context) ([]types.TaskInfo, error) { + var o mo.TaskHistoryCollector + + err := h.Properties(ctx, h.Reference(), []string{"recentTask"}, &o) + if err != nil { + return nil, err + } + + return o.LatestPage, nil +} + +// ReadNextTasks reads the scrollable view from the current position. The +// scrollable position is moved to the next newer page after the read. No item +// is returned when the end of the collector is reached. +func (h HistoryCollector) ReadNextTasks(ctx context.Context, maxCount int32) ([]types.TaskInfo, error) { + req := types.ReadNextTasks{ + This: h.Reference(), + MaxCount: maxCount, + } + + res, err := methods.ReadNextTasks(ctx, h.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} + +// ReadPreviousTasks reads the scrollable view from the current position. The +// scrollable position is then moved to the next older page after the read. No +// item is returned when the head of the collector is reached. +func (h HistoryCollector) ReadPreviousTasks(ctx context.Context, maxCount int32) ([]types.TaskInfo, error) { + req := types.ReadPreviousTasks{ + This: h.Reference(), + MaxCount: maxCount, + } + + res, err := methods.ReadPreviousTasks(ctx, h.Client(), &req) + if err != nil { + return nil, err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/task/manager.go b/vendor/github.com/vmware/govmomi/task/manager.go new file mode 100644 index 000000000..1fbf9ead2 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/task/manager.go @@ -0,0 +1,45 @@ +package task + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type Manager struct { + r types.ManagedObjectReference + c *vim25.Client +} + +// NewManager creates a new task manager +func NewManager(c *vim25.Client) *Manager { + m := Manager{ + r: *c.ServiceContent.TaskManager, + c: c, + } + + return &m +} + +// Reference returns the task.Manager MOID +func (m Manager) Reference() types.ManagedObjectReference { + return m.r +} + +// CreateCollectorForTasks returns a task history collector, a specialized +// history collector that gathers TaskInfo data objects. +func (m Manager) CreateCollectorForTasks(ctx context.Context, filter types.TaskFilterSpec) (*HistoryCollector, error) { + req := types.CreateCollectorForTasks{ + This: m.r, + Filter: filter, + } + + res, err := methods.CreateCollectorForTasks(ctx, m.c, &req) + if err != nil { + return nil, err + } + + return newHistoryCollector(m.c, res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/task/wait.go b/vendor/github.com/vmware/govmomi/task/wait.go index 19fee5384..b78f5110d 100644 --- a/vendor/github.com/vmware/govmomi/task/wait.go +++ b/vendor/github.com/vmware/govmomi/task/wait.go @@ -38,7 +38,7 @@ func (t taskProgress) Detail() string { func (t taskProgress) Error() error { if t.info.Error != nil { - return Error{t.info.Error} + return Error{t.info.Error, t.info.Description} } return nil @@ -68,7 +68,7 @@ func (t *taskCallback) fn(pc []types.PropertyChange) bool { t.info = &ti } - // t.info could be nil if pc can't satify the rules above + // t.info could be nil if pc can't satisfy the rules above if t.info == nil { return false } @@ -123,7 +123,18 @@ func Wait(ctx context.Context, ref types.ManagedObjectReference, pc *property.Co defer close(cb.ch) } - err := property.Wait(ctx, pc, ref, []string{"info"}, cb.fn) + filter := &property.WaitFilter{PropagateMissing: true} + filter.Add(ref, ref.Type, []string{"info"}) + + err := property.WaitForUpdates(ctx, pc, filter, func(updates []types.ObjectUpdate) bool { + for _, update := range updates { + if cb.fn(update.ChangeSet) { + return true + } + } + + return false + }) if err != nil { return nil, err } diff --git a/vendor/github.com/vmware/govmomi/vapi/internal/internal.go b/vendor/github.com/vmware/govmomi/vapi/internal/internal.go index 781299947..68fe79751 100644 --- a/vendor/github.com/vmware/govmomi/vapi/internal/internal.go +++ b/vendor/github.com/vmware/govmomi/vapi/internal/internal.go @@ -17,23 +17,34 @@ limitations under the License. package internal import ( - "bytes" - "encoding/json" - "io" - "net/http" - "net/url" - "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" ) +// VAPI REST Paths const ( - Path = "/rest/com/vmware" - SessionPath = "/cis/session" - CategoryPath = "/cis/tagging/category" - TagPath = "/cis/tagging/tag" - AssociationPath = "/cis/tagging/tag-association" - SessionCookieName = "vmware-api-session-id" + SessionPath = "/com/vmware/cis/session" + CategoryPath = "/com/vmware/cis/tagging/category" + TagPath = "/com/vmware/cis/tagging/tag" + AssociationPath = "/com/vmware/cis/tagging/tag-association" + LibraryPath = "/com/vmware/content/library" + LibraryItemFileData = "/com/vmware/cis/data" + LibraryItemPath = "/com/vmware/content/library/item" + LibraryItemFilePath = "/com/vmware/content/library/item/file" + LibraryItemUpdateSession = "/com/vmware/content/library/item/update-session" + LibraryItemUpdateSessionFile = "/com/vmware/content/library/item/updatesession/file" + LibraryItemDownloadSession = "/com/vmware/content/library/item/download-session" + LibraryItemDownloadSessionFile = "/com/vmware/content/library/item/downloadsession/file" + LocalLibraryPath = "/com/vmware/content/local-library" + SubscribedLibraryPath = "/com/vmware/content/subscribed-library" + SubscribedLibraryItem = "/com/vmware/content/library/subscribed-item" + Subscriptions = "/com/vmware/content/library/subscriptions" + VCenterOVFLibraryItem = "/com/vmware/vcenter/ovf/library-item" + VCenterVMTXLibraryItem = "/vcenter/vm-template/library-items" + VCenterVM = "/vcenter/vm" + SessionCookieName = "vmware-api-session-id" + UseHeaderAuthn = "vmware-use-header-authn" + DebugEcho = "/vc-sim/debug/echo" ) // AssociatedObject is the same structure as types.ManagedObjectReference, @@ -62,63 +73,15 @@ func NewAssociation(ref mo.Reference) Association { } } -type CloneURL interface { - URL() *url.URL -} - -// Resource wraps url.URL with helpers -type Resource struct { - u *url.URL -} - -func URL(c CloneURL, path string) *Resource { - r := &Resource{u: c.URL()} - r.u.Path = Path + path - return r -} - -// WithID appends id to the URL.Path -func (r *Resource) WithID(id string) *Resource { - r.u.Path += "/id:" + id - return r -} - -// WithAction sets adds action to the URL.RawQuery -func (r *Resource) WithAction(action string) *Resource { - r.u.RawQuery = url.Values{ - "~action": []string{action}, - }.Encode() - return r -} - -// Request returns a new http.Request for the given method. -// An optional body can be provided for POST and PATCH methods. -func (r *Resource) Request(method string, body ...interface{}) *http.Request { - rdr := io.MultiReader() // empty body by default - if len(body) != 0 { - rdr = encode(body[0]) - } - req, err := http.NewRequest(method, r.u.String(), rdr) - if err != nil { - panic(err) - } - return req -} - -type errorReader struct { - e error +type SubscriptionDestination struct { + ID string `json:"subscription"` } -func (e errorReader) Read([]byte) (int, error) { - return -1, e.e +type SubscriptionDestinationSpec struct { + Subscriptions []SubscriptionDestination `json:"subscriptions,omitempty"` } -// encode body as JSON, deferring any errors until io.Reader is used. -func encode(body interface{}) io.Reader { - var b bytes.Buffer - err := json.NewEncoder(&b).Encode(body) - if err != nil { - return errorReader{err} - } - return &b +type SubscriptionItemDestinationSpec struct { + Force bool `json:"force_sync_content"` + Subscriptions []SubscriptionDestination `json:"subscriptions,omitempty"` } diff --git a/vendor/github.com/vmware/govmomi/vapi/rest/client.go b/vendor/github.com/vmware/govmomi/vapi/rest/client.go index 152928ee2..3c4b148d4 100644 --- a/vendor/github.com/vmware/govmomi/vapi/rest/client.go +++ b/vendor/github.com/vmware/govmomi/vapi/rest/client.go @@ -25,6 +25,9 @@ import ( "io/ioutil" "net/http" "net/url" + "strings" + "sync" + "time" "github.com/vmware/govmomi/vapi/internal" "github.com/vmware/govmomi/vim25" @@ -33,14 +36,93 @@ import ( // Client extends soap.Client to support JSON encoding, while inheriting security features, debug tracing and session persistence. type Client struct { + mu sync.Mutex + *soap.Client + sessionID string +} + +// Session information +type Session struct { + User string `json:"user"` + Created time.Time `json:"created_time"` + LastAccessed time.Time `json:"last_accessed_time"` +} + +// LocalizableMessage represents a localizable error +type LocalizableMessage struct { + Args []string `json:"args,omitempty"` + DefaultMessage string `json:"default_message,omitempty"` + ID string `json:"id,omitempty"` +} + +func (m *LocalizableMessage) Error() string { + return m.DefaultMessage } // NewClient creates a new Client instance. func NewClient(c *vim25.Client) *Client { - sc := c.Client.NewServiceClient(internal.Path, "") + sc := c.Client.NewServiceClient(Path, "") + + return &Client{Client: sc} +} + +// SessionID is set by calling Login() or optionally with the given id param +func (c *Client) SessionID(id ...string) string { + c.mu.Lock() + defer c.mu.Unlock() + if len(id) != 0 { + c.sessionID = id[0] + } + return c.sessionID +} + +type marshaledClient struct { + SoapClient *soap.Client + SessionID string +} + +func (c *Client) MarshalJSON() ([]byte, error) { + m := marshaledClient{ + SoapClient: c.Client, + SessionID: c.sessionID, + } + + return json.Marshal(m) +} + +func (c *Client) UnmarshalJSON(b []byte) error { + var m marshaledClient - return &Client{sc} + err := json.Unmarshal(b, &m) + if err != nil { + return err + } + + *c = Client{ + Client: m.SoapClient, + sessionID: m.SessionID, + } + + return nil +} + +// isAPI returns true if path starts with "/api" +// This hack allows helpers to support both endpoints: +// "/rest" - value wrapped responses and structured error responses +// "/api" - raw responses and no structured error responses +func isAPI(path string) bool { + return strings.HasPrefix(path, "/api") +} + +// Resource helper for the given path. +func (c *Client) Resource(path string) *Resource { + r := &Resource{u: c.URL()} + if !isAPI(path) { + path = Path + path + } + r.u.Path = path + return r } type Signer interface { @@ -53,6 +135,32 @@ func (c *Client) WithSigner(ctx context.Context, s Signer) context.Context { return context.WithValue(ctx, signerContext{}, s) } +type headersContext struct{} + +// WithHeader returns a new Context populated with the provided headers map. +// Calls to a VAPI REST client with this context will populate the HTTP headers +// map using the provided headers. +func (c *Client) WithHeader( + ctx context.Context, + headers http.Header) context.Context { + + return context.WithValue(ctx, headersContext{}, headers) +} + +type statusError struct { + res *http.Response +} + +func (e *statusError) Error() string { + return fmt.Sprintf("%s %s: %s", e.res.Request.Method, e.res.Request.URL, e.res.Status) +} + +// RawResponse may be used with the Do method as the resBody argument in order +// to capture the raw response data. +type RawResponse struct { + bytes.Buffer +} + // Do sends the http.Request, decoding resBody if provided. func (c *Client) Do(ctx context.Context, req *http.Request, resBody interface{}) error { switch req.Method { @@ -62,15 +170,29 @@ func (c *Client) Do(ctx context.Context, req *http.Request, resBody interface{}) req.Header.Set("Accept", "application/json") + if id := c.SessionID(); id != "" { + req.Header.Set(internal.SessionCookieName, id) + } + if s, ok := ctx.Value(signerContext{}).(Signer); ok { if err := s.SignRequest(req); err != nil { return err } } + if headers, ok := ctx.Value(headersContext{}).(http.Header); ok { + for k, v := range headers { + for _, v := range v { + req.Header.Add(k, v) + } + } + } + return c.Client.Do(ctx, req, func(res *http.Response) error { switch res.StatusCode { case http.StatusOK: + case http.StatusCreated: + case http.StatusNoContent: case http.StatusBadRequest: // TODO: structured error types detail, err := ioutil.ReadAll(res.Body) @@ -79,7 +201,7 @@ func (c *Client) Do(ctx context.Context, req *http.Request, resBody interface{}) } return fmt.Errorf("%s: %s", res.Status, bytes.TrimSpace(detail)) default: - return fmt.Errorf("%s %s: %s", req.Method, req.URL, res.Status) + return &statusError{res} } if resBody == nil { @@ -87,23 +209,68 @@ func (c *Client) Do(ctx context.Context, req *http.Request, resBody interface{}) } switch b := resBody.(type) { + case *RawResponse: + return res.Write(b) case io.Writer: _, err := io.Copy(b, res.Body) return err default: + d := json.NewDecoder(res.Body) + if isAPI(req.URL.Path) { + // Responses from the /api endpoint are not wrapped + return d.Decode(resBody) + } + // Responses from the /rest endpoint are wrapped in this structure val := struct { Value interface{} `json:"value,omitempty"` }{ resBody, } - return json.NewDecoder(res.Body).Decode(&val) + return d.Decode(&val) } }) } +// authHeaders ensures the given map contains a REST auth header +func (c *Client) authHeaders(h map[string]string) map[string]string { + if _, exists := h[internal.SessionCookieName]; exists { + return h + } + if h == nil { + h = make(map[string]string) + } + + h[internal.SessionCookieName] = c.SessionID() + + return h +} + +// Download wraps soap.Client.Download, adding the REST authentication header +func (c *Client) Download(ctx context.Context, u *url.URL, param *soap.Download) (io.ReadCloser, int64, error) { + p := *param + p.Headers = c.authHeaders(p.Headers) + return c.Client.Download(ctx, u, &p) +} + +// DownloadFile wraps soap.Client.DownloadFile, adding the REST authentication header +func (c *Client) DownloadFile(ctx context.Context, file string, u *url.URL, param *soap.Download) error { + p := *param + p.Headers = c.authHeaders(p.Headers) + return c.Client.DownloadFile(ctx, file, u, &p) +} + +// Upload wraps soap.Client.Upload, adding the REST authentication header +func (c *Client) Upload(ctx context.Context, f io.Reader, u *url.URL, param *soap.Upload) error { + p := *param + p.Headers = c.authHeaders(p.Headers) + return c.Client.Upload(ctx, f, u, &p) +} + // Login creates a new session via Basic Authentication with the given url.Userinfo. func (c *Client) Login(ctx context.Context, user *url.Userinfo) error { - req := internal.URL(c, internal.SessionPath).Request(http.MethodPost) + req := c.Resource(internal.SessionPath).Request(http.MethodPost) + + req.Header.Set(internal.UseHeaderAuthn, "true") if user != nil { if password, ok := user.Password(); ok { @@ -111,15 +278,59 @@ func (c *Client) Login(ctx context.Context, user *url.Userinfo) error { } } - return c.Do(ctx, req, nil) + var id string + err := c.Do(ctx, req, &id) + if err != nil { + return err + } + + c.SessionID(id) + + return nil } func (c *Client) LoginByToken(ctx context.Context) error { return c.Login(ctx, nil) } +// Session returns the user's current session. +// Nil is returned if the session is not authenticated. +func (c *Client) Session(ctx context.Context) (*Session, error) { + var s Session + req := c.Resource(internal.SessionPath).WithAction("get").Request(http.MethodPost) + err := c.Do(ctx, req, &s) + if err != nil { + if e, ok := err.(*statusError); ok { + if e.res.StatusCode == http.StatusUnauthorized { + return nil, nil + } + } + return nil, err + } + return &s, nil +} + // Logout deletes the current session. func (c *Client) Logout(ctx context.Context) error { - req := internal.URL(c, internal.SessionPath).Request(http.MethodDelete) + req := c.Resource(internal.SessionPath).Request(http.MethodDelete) return c.Do(ctx, req, nil) } + +// Valid returns whether or not the client is valid and ready for use. +// This should be called after unmarshalling the client. +func (c *Client) Valid() bool { + if c == nil { + return false + } + + if c.Client == nil { + return false + } + + return true +} + +// Path returns rest.Path (see cache.Client) +func (c *Client) Path() string { + return Path +} diff --git a/vendor/github.com/vmware/govmomi/vapi/rest/resource.go b/vendor/github.com/vmware/govmomi/vapi/rest/resource.go new file mode 100644 index 000000000..3004abb85 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vapi/rest/resource.go @@ -0,0 +1,90 @@ +/* +Copyright (c) 2019 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/url" +) + +const ( + Path = "/rest" +) + +// Resource wraps url.URL with helpers +type Resource struct { + u *url.URL +} + +func (r *Resource) String() string { + return r.u.String() +} + +// WithID appends id to the URL.Path +func (r *Resource) WithID(id string) *Resource { + r.u.Path += "/id:" + id + return r +} + +// WithAction sets adds action to the URL.RawQuery +func (r *Resource) WithAction(action string) *Resource { + return r.WithParam("~action", action) +} + +// WithParam adds one parameter on the URL.RawQuery +func (r *Resource) WithParam(name string, value string) *Resource { + // ParseQuery handles empty case, and we control access to query string so shouldn't encounter an error case + params, _ := url.ParseQuery(r.u.RawQuery) + params[name] = append(params[name], value) + r.u.RawQuery = params.Encode() + return r +} + +// Request returns a new http.Request for the given method. +// An optional body can be provided for POST and PATCH methods. +func (r *Resource) Request(method string, body ...interface{}) *http.Request { + rdr := io.MultiReader() // empty body by default + if len(body) != 0 { + rdr = encode(body[0]) + } + req, err := http.NewRequest(method, r.u.String(), rdr) + if err != nil { + panic(err) + } + return req +} + +type errorReader struct { + e error +} + +func (e errorReader) Read([]byte) (int, error) { + return -1, e.e +} + +// encode body as JSON, deferring any errors until io.Reader is used. +func encode(body interface{}) io.Reader { + var b bytes.Buffer + err := json.NewEncoder(&b).Encode(body) + if err != nil { + return errorReader{err} + } + return &b +} diff --git a/vendor/github.com/vmware/govmomi/vapi/tags/categories.go b/vendor/github.com/vmware/govmomi/vapi/tags/categories.go index 119b96506..64d0b66c1 100644 --- a/vendor/github.com/vmware/govmomi/vapi/tags/categories.go +++ b/vendor/github.com/vmware/govmomi/vapi/tags/categories.go @@ -20,11 +20,13 @@ import ( "context" "fmt" "net/http" + "strings" "github.com/vmware/govmomi/vapi/internal" ) -// Category provides methods to create, read, update, delete, and enumerate categories. +// Category provides methods to create, read, update, delete, and enumerate +// categories. type Category struct { ID string `json:"id,omitempty"` Name string `json:"name,omitempty"` @@ -87,12 +89,13 @@ func (c *Manager) CreateCategory(ctx context.Context, category *Category) (strin // otherwise create fails with invalid_argument spec.Category.AssociableTypes = []string{} } - url := internal.URL(c, internal.CategoryPath) + url := c.Resource(internal.CategoryPath) var res string return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) } -// UpdateCategory can update one or more of the AssociableTypes, Cardinality, Description and Name fields. +// UpdateCategory updates one or more of the AssociableTypes, Cardinality, +// Description and Name fields. func (c *Manager) UpdateCategory(ctx context.Context, category *Category) error { spec := struct { Category Category `json:"update_spec"` @@ -104,13 +107,13 @@ func (c *Manager) UpdateCategory(ctx context.Context, category *Category) error Name: category.Name, }, } - url := internal.URL(c, internal.CategoryPath).WithID(category.ID) + url := c.Resource(internal.CategoryPath).WithID(category.ID) return c.Do(ctx, url.Request(http.MethodPatch, spec), nil) } -// DeleteCategory deletes an existing category. +// DeleteCategory deletes a category. func (c *Manager) DeleteCategory(ctx context.Context, category *Category) error { - url := internal.URL(c, internal.CategoryPath).WithID(category.ID) + url := c.Resource(internal.CategoryPath).WithID(category.ID) return c.Do(ctx, url.Request(http.MethodDelete), nil) } @@ -129,19 +132,19 @@ func (c *Manager) GetCategory(ctx context.Context, id string) (*Category, error) } } } - url := internal.URL(c, internal.CategoryPath).WithID(id) + url := c.Resource(internal.CategoryPath).WithID(id) var res Category return &res, c.Do(ctx, url.Request(http.MethodGet), &res) } // ListCategories returns all category IDs in the system. func (c *Manager) ListCategories(ctx context.Context) ([]string, error) { - url := internal.URL(c, internal.CategoryPath) + url := c.Resource(internal.CategoryPath) var res []string return res, c.Do(ctx, url.Request(http.MethodGet), &res) } -// GetCategories fetches an array of category information in the system. +// GetCategories fetches a list of category information in the system. func (c *Manager) GetCategories(ctx context.Context) ([]Category, error) { ids, err := c.ListCategories(ctx) if err != nil { @@ -152,11 +155,13 @@ func (c *Manager) GetCategories(ctx context.Context) ([]Category, error) { for _, id := range ids { category, err := c.GetCategory(ctx, id) if err != nil { - return nil, fmt.Errorf("get category %s: %s", id, err) + if strings.Contains(err.Error(), http.StatusText(http.StatusNotFound)) { + continue // deleted since last fetch + } + return nil, fmt.Errorf("get category %s: %v", id, err) } - categories = append(categories, *category) - } + return categories, nil } diff --git a/vendor/github.com/vmware/govmomi/vapi/tags/errors.go b/vendor/github.com/vmware/govmomi/vapi/tags/errors.go new file mode 100644 index 000000000..b3f84f842 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vapi/tags/errors.go @@ -0,0 +1,55 @@ +/* +Copyright (c) 2020 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tags + +import ( + "fmt" +) + +const ( + errFormat = "[error: %d type: %s reason: %s]" + separator = "," // concat multiple error strings +) + +// BatchError is an error returned for a single item which failed in a batch +// operation +type BatchError struct { + Type string `json:"id"` + Message string `json:"default_message"` +} + +// BatchErrors contains all errors which occurred in a batch operation +type BatchErrors []BatchError + +func (b BatchErrors) Error() string { + if len(b) == 0 { + return "" + } + + var errString string + for i := range b { + errType := b[i].Type + reason := b[i].Message + errString += fmt.Sprintf(errFormat, i, errType, reason) + + // no separator after last item + if i+1 < len(b) { + errString += separator + } + } + return errString +} diff --git a/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go b/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go index 1e1434186..33b220936 100644 --- a/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go +++ b/vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go @@ -18,6 +18,7 @@ package tags import ( "context" + "encoding/json" "fmt" "net/http" @@ -43,7 +44,7 @@ func (c *Manager) AttachTag(ctx context.Context, tagID string, ref mo.Reference) return err } spec := internal.NewAssociation(ref) - url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("attach") + url := c.Resource(internal.AssociationPath).WithID(id).WithAction("attach") return c.Do(ctx, url.Request(http.MethodPost, spec), nil) } @@ -55,14 +56,146 @@ func (c *Manager) DetachTag(ctx context.Context, tagID string, ref mo.Reference) return err } spec := internal.NewAssociation(ref) - url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("detach") + url := c.Resource(internal.AssociationPath).WithID(id).WithAction("detach") return c.Do(ctx, url.Request(http.MethodPost, spec), nil) } +// batchResponse is the response type used by attach/detach operations which +// take multiple tagIDs or moRefs as input. On failure Success will be false and +// Errors contains information about all failed operations +type batchResponse struct { + Success bool `json:"success"` + Errors BatchErrors `json:"error_messages,omitempty"` +} + +// AttachTagToMultipleObjects attaches a tag ID to multiple managed objects. +// This operation is idempotent, i.e. if a tag is already attached to the +// object, then the individual operation is a no-op and no error will be thrown. +// +// This operation was added in vSphere API 6.5. +func (c *Manager) AttachTagToMultipleObjects(ctx context.Context, tagID string, refs []mo.Reference) error { + id, err := c.tagID(ctx, tagID) + if err != nil { + return err + } + + var ids []internal.AssociatedObject + for i := range refs { + ids = append(ids, internal.AssociatedObject(refs[i].Reference())) + } + + spec := struct { + ObjectIDs []internal.AssociatedObject `json:"object_ids"` + }{ids} + + url := c.Resource(internal.AssociationPath).WithID(id).WithAction("attach-tag-to-multiple-objects") + return c.Do(ctx, url.Request(http.MethodPost, spec), nil) +} + +// AttachMultipleTagsToObject attaches multiple tag IDs to a managed object. +// This operation is idempotent. If a tag is already attached to the object, +// then the individual operation is a no-op and no error will be thrown. This +// operation is not atomic. If the underlying call fails with one or more tags +// not successfully attached to the managed object reference it might leave the +// managed object reference in a partially tagged state and needs to be resolved +// by the caller. In this case BatchErrors is returned and can be used to +// analyse failure reasons on each failed tag. +// +// Specified tagIDs must use URN-notation instead of display names or a generic +// error will be returned and no tagging operation will be performed. If the +// managed object reference does not exist a generic 403 Forbidden error will be +// returned. +// +// This operation was added in vSphere API 6.5. +func (c *Manager) AttachMultipleTagsToObject(ctx context.Context, tagIDs []string, ref mo.Reference) error { + for _, id := range tagIDs { + // URN enforced to avoid unnecessary round-trips due to invalid tags or display + // name lookups + if isName(id) { + return fmt.Errorf("specified tag is not a URN: %q", id) + } + } + + obj := internal.AssociatedObject(ref.Reference()) + spec := struct { + ObjectID internal.AssociatedObject `json:"object_id"` + TagIDs []string `json:"tag_ids"` + }{ + ObjectID: obj, + TagIDs: tagIDs, + } + + var res batchResponse + url := c.Resource(internal.AssociationPath).WithAction("attach-multiple-tags-to-object") + err := c.Do(ctx, url.Request(http.MethodPost, spec), &res) + if err != nil { + return err + } + + if !res.Success { + if len(res.Errors) != 0 { + return res.Errors + } + panic("invalid batch error") + } + + return nil +} + +// DetachMultipleTagsFromObject detaches multiple tag IDs from a managed object. +// This operation is idempotent. If a tag is already detached from the object, +// then the individual operation is a no-op and no error will be thrown. This +// operation is not atomic. If the underlying call fails with one or more tags +// not successfully detached from the managed object reference it might leave +// the managed object reference in a partially tagged state and needs to be +// resolved by the caller. In this case BatchErrors is returned and can be used +// to analyse failure reasons on each failed tag. +// +// Specified tagIDs must use URN-notation instead of display names or a generic +// error will be returned and no tagging operation will be performed. If the +// managed object reference does not exist a generic 403 Forbidden error will be +// returned. +// +// This operation was added in vSphere API 6.5. +func (c *Manager) DetachMultipleTagsFromObject(ctx context.Context, tagIDs []string, ref mo.Reference) error { + for _, id := range tagIDs { + // URN enforced to avoid unnecessary round-trips due to invalid tags or display + // name lookups + if isName(id) { + return fmt.Errorf("specified tag is not a URN: %q", id) + } + } + + obj := internal.AssociatedObject(ref.Reference()) + spec := struct { + ObjectID internal.AssociatedObject `json:"object_id"` + TagIDs []string `json:"tag_ids"` + }{ + ObjectID: obj, + TagIDs: tagIDs, + } + + var res batchResponse + url := c.Resource(internal.AssociationPath).WithAction("detach-multiple-tags-from-object") + err := c.Do(ctx, url.Request(http.MethodPost, spec), &res) + if err != nil { + return err + } + + if !res.Success { + if len(res.Errors) != 0 { + return res.Errors + } + panic("invalid batch error") + } + + return nil +} + // ListAttachedTags fetches the array of tag IDs attached to the given object. func (c *Manager) ListAttachedTags(ctx context.Context, ref mo.Reference) ([]string, error) { spec := internal.NewAssociation(ref) - url := internal.URL(c, internal.AssociationPath).WithAction("list-attached-tags") + url := c.Resource(internal.AssociationPath).WithAction("list-attached-tags") var res []string return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) } @@ -91,7 +224,7 @@ func (c *Manager) ListAttachedObjects(ctx context.Context, tagID string) ([]mo.R if err != nil { return nil, err } - url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("list-attached-objects") + url := c.Resource(internal.AssociationPath).WithID(id).WithAction("list-attached-objects") var res []internal.AssociatedObject if err := c.Do(ctx, url.Request(http.MethodPost, nil), &res); err != nil { return nil, err @@ -103,3 +236,142 @@ func (c *Manager) ListAttachedObjects(ctx context.Context, tagID string) ([]mo.R } return refs, nil } + +// AttachedObjects is the response type used by ListAttachedObjectsOnTags. +type AttachedObjects struct { + TagID string `json:"tag_id"` + Tag *Tag `json:"tag,omitempty"` + ObjectIDs []mo.Reference `json:"object_ids"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (t *AttachedObjects) UnmarshalJSON(b []byte) error { + var o struct { + TagID string `json:"tag_id"` + ObjectIDs []internal.AssociatedObject `json:"object_ids"` + } + err := json.Unmarshal(b, &o) + if err != nil { + return err + } + + t.TagID = o.TagID + t.ObjectIDs = make([]mo.Reference, len(o.ObjectIDs)) + for i := range o.ObjectIDs { + t.ObjectIDs[i] = o.ObjectIDs[i] + } + + return nil +} + +// ListAttachedObjectsOnTags fetches the array of attached objects for the given tag IDs. +func (c *Manager) ListAttachedObjectsOnTags(ctx context.Context, tagID []string) ([]AttachedObjects, error) { + var ids []string + for i := range tagID { + id, err := c.tagID(ctx, tagID[i]) + if err != nil { + return nil, err + } + ids = append(ids, id) + } + + spec := struct { + TagIDs []string `json:"tag_ids"` + }{ids} + + url := c.Resource(internal.AssociationPath).WithAction("list-attached-objects-on-tags") + var res []AttachedObjects + return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) +} + +// GetAttachedObjectsOnTags combines ListAttachedObjectsOnTags and populates each Tag field. +func (c *Manager) GetAttachedObjectsOnTags(ctx context.Context, tagID []string) ([]AttachedObjects, error) { + objs, err := c.ListAttachedObjectsOnTags(ctx, tagID) + if err != nil { + return nil, fmt.Errorf("list attached objects %s: %s", tagID, err) + } + + tags := make(map[string]*Tag) + + for i := range objs { + var err error + id := objs[i].TagID + tag, ok := tags[id] + if !ok { + tag, err = c.GetTag(ctx, id) + if err != nil { + return nil, fmt.Errorf("get tag %s: %s", id, err) + } + objs[i].Tag = tag + } + } + + return objs, nil +} + +// AttachedTags is the response type used by ListAttachedTagsOnObjects. +type AttachedTags struct { + ObjectID mo.Reference `json:"object_id"` + TagIDs []string `json:"tag_ids"` + Tags []Tag `json:"tags,omitempty"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (t *AttachedTags) UnmarshalJSON(b []byte) error { + var o struct { + ObjectID internal.AssociatedObject `json:"object_id"` + TagIDs []string `json:"tag_ids"` + } + err := json.Unmarshal(b, &o) + if err != nil { + return err + } + + t.ObjectID = o.ObjectID + t.TagIDs = o.TagIDs + + return nil +} + +// ListAttachedTagsOnObjects fetches the array of attached tag IDs for the given object IDs. +func (c *Manager) ListAttachedTagsOnObjects(ctx context.Context, objectID []mo.Reference) ([]AttachedTags, error) { + var ids []internal.AssociatedObject + for i := range objectID { + ids = append(ids, internal.AssociatedObject(objectID[i].Reference())) + } + + spec := struct { + ObjectIDs []internal.AssociatedObject `json:"object_ids"` + }{ids} + + url := c.Resource(internal.AssociationPath).WithAction("list-attached-tags-on-objects") + var res []AttachedTags + return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) +} + +// GetAttachedTagsOnObjects calls ListAttachedTagsOnObjects and populates each Tags field. +func (c *Manager) GetAttachedTagsOnObjects(ctx context.Context, objectID []mo.Reference) ([]AttachedTags, error) { + objs, err := c.ListAttachedTagsOnObjects(ctx, objectID) + if err != nil { + return nil, fmt.Errorf("list attached tags %s: %s", objectID, err) + } + + tags := make(map[string]*Tag) + + for i := range objs { + for _, id := range objs[i].TagIDs { + var err error + tag, ok := tags[id] + if !ok { + tag, err = c.GetTag(ctx, id) + if err != nil { + return nil, fmt.Errorf("get tag %s: %s", id, err) + } + tags[id] = tag + } + objs[i].Tags = append(objs[i].Tags, *tag) + } + } + + return objs, nil +} diff --git a/vendor/github.com/vmware/govmomi/vapi/tags/tags.go b/vendor/github.com/vmware/govmomi/vapi/tags/tags.go index 8e3c0a6ca..b9024f998 100644 --- a/vendor/github.com/vmware/govmomi/vapi/tags/tags.go +++ b/vendor/github.com/vmware/govmomi/vapi/tags/tags.go @@ -90,7 +90,7 @@ func (c *Manager) CreateTag(ctx context.Context, tag *Tag) (string, error) { } spec.Tag.CategoryID = cat.ID } - url := internal.URL(c, internal.TagPath) + url := c.Resource(internal.TagPath) var res string return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) } @@ -105,13 +105,13 @@ func (c *Manager) UpdateTag(ctx context.Context, tag *Tag) error { Description: tag.Description, }, } - url := internal.URL(c, internal.TagPath).WithID(tag.ID) + url := c.Resource(internal.TagPath).WithID(tag.ID) return c.Do(ctx, url.Request(http.MethodPatch, spec), nil) } // DeleteTag deletes an existing tag. func (c *Manager) DeleteTag(ctx context.Context, tag *Tag) error { - url := internal.URL(c, internal.TagPath).WithID(tag.ID) + url := c.Resource(internal.TagPath).WithID(tag.ID) return c.Do(ctx, url.Request(http.MethodDelete), nil) } @@ -131,7 +131,7 @@ func (c *Manager) GetTag(ctx context.Context, id string) (*Tag, error) { } } - url := internal.URL(c, internal.TagPath).WithID(id) + url := c.Resource(internal.TagPath).WithID(id) var res Tag return &res, c.Do(ctx, url.Request(http.MethodGet), &res) @@ -148,10 +148,10 @@ func (c *Manager) GetTagForCategory(ctx context.Context, id, category string) (* return nil, err } - for _, id := range ids { - tag, err := c.GetTag(ctx, id) + for _, tagid := range ids { + tag, err := c.GetTag(ctx, tagid) if err != nil { - return nil, fmt.Errorf("get tag for category %s %s: %s", category, id, err) + return nil, fmt.Errorf("get tag for category %s %s: %s", category, tagid, err) } if tag.ID == id || tag.Name == id { return tag, nil @@ -163,7 +163,7 @@ func (c *Manager) GetTagForCategory(ctx context.Context, id, category string) (* // ListTags returns all tag IDs in the system. func (c *Manager) ListTags(ctx context.Context) ([]string, error) { - url := internal.URL(c, internal.TagPath) + url := c.Resource(internal.TagPath) var res []string return res, c.Do(ctx, url.Request(http.MethodGet), &res) } @@ -201,7 +201,7 @@ func (c *Manager) ListTagsForCategory(ctx context.Context, id string) ([]string, body := struct { ID string `json:"category_id"` }{id} - url := internal.URL(c, internal.TagPath).WithID(id).WithAction("list-tags-for-category") + url := c.Resource(internal.TagPath).WithID(id).WithAction("list-tags-for-category") var res []string return res, c.Do(ctx, url.Request(http.MethodPost, body), &res) } diff --git a/vendor/github.com/vmware/govmomi/view/container_view.go b/vendor/github.com/vmware/govmomi/view/container_view.go new file mode 100644 index 000000000..39041c41f --- /dev/null +++ b/vendor/github.com/vmware/govmomi/view/container_view.go @@ -0,0 +1,145 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type ContainerView struct { + ManagedObjectView +} + +func NewContainerView(c *vim25.Client, ref types.ManagedObjectReference) *ContainerView { + return &ContainerView{ + ManagedObjectView: *NewManagedObjectView(c, ref), + } +} + +// Retrieve populates dst as property.Collector.Retrieve does, for all entities in the view of types specified by kind. +func (v ContainerView) Retrieve(ctx context.Context, kind []string, ps []string, dst interface{}, pspec ...types.PropertySpec) error { + pc := property.DefaultCollector(v.Client()) + + ospec := types.ObjectSpec{ + Obj: v.Reference(), + Skip: types.NewBool(true), + SelectSet: []types.BaseSelectionSpec{ + &types.TraversalSpec{ + Type: v.Reference().Type, + Path: "view", + }, + }, + } + + if len(kind) == 0 { + kind = []string{"ManagedEntity"} + } + + for _, t := range kind { + spec := types.PropertySpec{ + Type: t, + } + + if len(ps) == 0 { + spec.All = types.NewBool(true) + } else { + spec.PathSet = ps + } + + pspec = append(pspec, spec) + } + + req := types.RetrieveProperties{ + SpecSet: []types.PropertyFilterSpec{ + { + ObjectSet: []types.ObjectSpec{ospec}, + PropSet: pspec, + }, + }, + } + + res, err := pc.RetrieveProperties(ctx, req) + if err != nil { + return err + } + + if d, ok := dst.(*[]types.ObjectContent); ok { + *d = res.Returnval + return nil + } + + return mo.LoadObjectContent(res.Returnval, dst) +} + +// RetrieveWithFilter populates dst as Retrieve does, but only for entities matching the given filter. +func (v ContainerView) RetrieveWithFilter(ctx context.Context, kind []string, ps []string, dst interface{}, filter property.Filter) error { + if len(filter) == 0 { + return v.Retrieve(ctx, kind, ps, dst) + } + + var content []types.ObjectContent + + err := v.Retrieve(ctx, kind, filter.Keys(), &content) + if err != nil { + return err + } + + objs := filter.MatchObjectContent(content) + + pc := property.DefaultCollector(v.Client()) + + return pc.Retrieve(ctx, objs, ps, dst) +} + +// Find returns object references for entities of type kind, matching the given filter. +func (v ContainerView) Find(ctx context.Context, kind []string, filter property.Filter) ([]types.ManagedObjectReference, error) { + if len(filter) == 0 { + // Ensure we have at least 1 filter to avoid retrieving all properties. + filter = property.Filter{"name": "*"} + } + + var content []types.ObjectContent + + err := v.Retrieve(ctx, kind, filter.Keys(), &content) + if err != nil { + return nil, err + } + + return filter.MatchObjectContent(content), nil +} + +// FindAny returns object references for entities of type kind, matching any property the given filter. +func (v ContainerView) FindAny(ctx context.Context, kind []string, filter property.Filter) ([]types.ManagedObjectReference, error) { + if len(filter) == 0 { + // Ensure we have at least 1 filter to avoid retrieving all properties. + filter = property.Filter{"name": "*"} + } + + var content []types.ObjectContent + + err := v.Retrieve(ctx, kind, filter.Keys(), &content) + if err != nil { + return nil, err + } + + return filter.MatchAnyObjectContent(content), nil +} diff --git a/vendor/github.com/vmware/govmomi/view/list_view.go b/vendor/github.com/vmware/govmomi/view/list_view.go new file mode 100644 index 000000000..e8cfb5043 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/view/list_view.go @@ -0,0 +1,62 @@ +/* +Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type ListView struct { + ManagedObjectView +} + +func NewListView(c *vim25.Client, ref types.ManagedObjectReference) *ListView { + return &ListView{ + ManagedObjectView: *NewManagedObjectView(c, ref), + } +} + +func (v ListView) Add(ctx context.Context, refs []types.ManagedObjectReference) error { + req := types.ModifyListView{ + This: v.Reference(), + Add: refs, + } + _, err := methods.ModifyListView(ctx, v.Client(), &req) + return err +} + +func (v ListView) Remove(ctx context.Context, refs []types.ManagedObjectReference) error { + req := types.ModifyListView{ + This: v.Reference(), + Remove: refs, + } + _, err := methods.ModifyListView(ctx, v.Client(), &req) + return err +} + +func (v ListView) Reset(ctx context.Context, refs []types.ManagedObjectReference) error { + req := types.ResetListView{ + This: v.Reference(), + Obj: refs, + } + _, err := methods.ResetListView(ctx, v.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/view/managed_object_view.go b/vendor/github.com/vmware/govmomi/view/managed_object_view.go new file mode 100644 index 000000000..805c86431 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/view/managed_object_view.go @@ -0,0 +1,52 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type ManagedObjectView struct { + object.Common +} + +func NewManagedObjectView(c *vim25.Client, ref types.ManagedObjectReference) *ManagedObjectView { + return &ManagedObjectView{ + Common: object.NewCommon(c, ref), + } +} + +func (v *ManagedObjectView) TraversalSpec() *types.TraversalSpec { + return &types.TraversalSpec{ + Path: "view", + Type: v.Reference().Type, + } +} + +func (v *ManagedObjectView) Destroy(ctx context.Context) error { + req := types.DestroyView{ + This: v.Reference(), + } + + _, err := methods.DestroyView(ctx, v.Client(), &req) + return err +} diff --git a/vendor/github.com/vmware/govmomi/view/manager.go b/vendor/github.com/vmware/govmomi/view/manager.go new file mode 100644 index 000000000..d44def0cd --- /dev/null +++ b/vendor/github.com/vmware/govmomi/view/manager.go @@ -0,0 +1,69 @@ +/* +Copyright (c) 2015 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +type Manager struct { + object.Common +} + +func NewManager(c *vim25.Client) *Manager { + m := Manager{ + object.NewCommon(c, *c.ServiceContent.ViewManager), + } + + return &m +} + +func (m Manager) CreateListView(ctx context.Context, objects []types.ManagedObjectReference) (*ListView, error) { + req := types.CreateListView{ + This: m.Common.Reference(), + Obj: objects, + } + + res, err := methods.CreateListView(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return NewListView(m.Client(), res.Returnval), nil +} + +func (m Manager) CreateContainerView(ctx context.Context, container types.ManagedObjectReference, managedObjectTypes []string, recursive bool) (*ContainerView, error) { + + req := types.CreateContainerView{ + This: m.Common.Reference(), + Container: container, + Recursive: recursive, + Type: managedObjectTypes, + } + + res, err := methods.CreateContainerView(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return NewContainerView(m.Client(), res.Returnval), nil +} diff --git a/vendor/github.com/vmware/govmomi/view/task_view.go b/vendor/github.com/vmware/govmomi/view/task_view.go new file mode 100644 index 000000000..68f62f8d1 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/view/task_view.go @@ -0,0 +1,125 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package view + +import ( + "context" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/types" +) + +// TaskView extends ListView such that it can follow a ManagedEntity's recentTask updates. +type TaskView struct { + *ListView + + Follow bool + + Watch *types.ManagedObjectReference +} + +// CreateTaskView creates a new ListView that optionally watches for a ManagedEntity's recentTask updates. +func (m Manager) CreateTaskView(ctx context.Context, watch *types.ManagedObjectReference) (*TaskView, error) { + l, err := m.CreateListView(ctx, nil) + if err != nil { + return nil, err + } + + tv := &TaskView{ + ListView: l, + Watch: watch, + } + + return tv, nil +} + +// Collect calls function f for each Task update. +func (v TaskView) Collect(ctx context.Context, f func([]types.TaskInfo)) error { + // Using TaskHistoryCollector would be less clunky, but it isn't supported on ESX at all. + ref := v.Reference() + filter := new(property.WaitFilter).Add(ref, "Task", []string{"info"}, v.TraversalSpec()) + + if v.Watch != nil { + filter.Add(*v.Watch, v.Watch.Type, []string{"recentTask"}) + } + + pc := property.DefaultCollector(v.Client()) + + completed := make(map[string]bool) + + return property.WaitForUpdates(ctx, pc, filter, func(updates []types.ObjectUpdate) bool { + var infos []types.TaskInfo + var prune []types.ManagedObjectReference + var tasks []types.ManagedObjectReference + var reset func() + + for _, update := range updates { + for _, change := range update.ChangeSet { + if change.Name == "recentTask" { + tasks = change.Val.(types.ArrayOfManagedObjectReference).ManagedObjectReference + if len(tasks) != 0 { + reset = func() { + _ = v.Reset(ctx, tasks) + + // Remember any tasks we've reported as complete already, + // to avoid reporting multiple times when Reset is triggered. + rtasks := make(map[string]bool) + for i := range tasks { + if _, ok := completed[tasks[i].Value]; ok { + rtasks[tasks[i].Value] = true + } + } + completed = rtasks + } + } + + continue + } + + info, ok := change.Val.(types.TaskInfo) + if !ok { + continue + } + + if !completed[info.Task.Value] { + infos = append(infos, info) + } + + if v.Follow && info.CompleteTime != nil { + prune = append(prune, info.Task) + completed[info.Task.Value] = true + } + } + } + + if len(infos) != 0 { + f(infos) + } + + if reset != nil { + reset() + } else if len(prune) != 0 { + _ = v.Remove(ctx, prune) + } + + if len(tasks) != 0 && len(infos) == 0 { + return false + } + + return !v.Follow + }) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/client.go b/vendor/github.com/vmware/govmomi/vim25/client.go index 1d26fb8b6..b14cea852 100644 --- a/vendor/github.com/vmware/govmomi/vim25/client.go +++ b/vendor/github.com/vmware/govmomi/vim25/client.go @@ -19,16 +19,20 @@ package vim25 import ( "context" "encoding/json" + "fmt" + "net/http" + "path" "strings" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/govmomi/vim25/xml" ) const ( Namespace = "vim25" - Version = "6.7" + Version = "7.0" Path = "/sdk" ) @@ -54,7 +58,7 @@ type Client struct { RoundTripper soap.RoundTripper } -// NewClient creates and returns a new client wirh the ServiceContent field +// NewClient creates and returns a new client with the ServiceContent field // filled in. func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) { c := Client{ @@ -67,7 +71,7 @@ func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) { if c.Namespace == "" { c.Namespace = "urn:" + Namespace - } else if strings.Index(c.Namespace, ":") < 0 { + } else if !strings.Contains(c.Namespace, ":") { c.Namespace = "urn:" + c.Namespace // ensure valid URI format } if c.Version == "" { @@ -84,6 +88,42 @@ func NewClient(ctx context.Context, rt soap.RoundTripper) (*Client, error) { return &c, nil } +// UseServiceVersion sets soap.Client.Version to the current version of the service endpoint via /sdk/vimServiceVersions.xml +func (c *Client) UseServiceVersion(kind ...string) error { + ns := "vim" + if len(kind) != 0 { + ns = kind[0] + } + + u := c.URL() + u.Path = path.Join(Path, ns+"ServiceVersions.xml") + + res, err := c.Get(u.String()) + if err != nil { + return err + } + + if res.StatusCode != http.StatusOK { + return fmt.Errorf("http.Get(%s): %s", u.Path, err) + } + + v := struct { + Namespace *string `xml:"namespace>name"` + Version *string `xml:"namespace>version"` + }{ + &c.Namespace, + &c.Version, + } + + err = xml.NewDecoder(res.Body).Decode(&v) + _ = res.Body.Close() + if err != nil { + return fmt.Errorf("xml.Decode(%s): %s", u.Path, err) + } + + return nil +} + // RoundTrip dispatches to the RoundTripper field. func (c *Client) RoundTrip(ctx context.Context, req, res soap.HasFault) error { return c.RoundTripper.RoundTrip(ctx, req, res) @@ -140,6 +180,11 @@ func (c *Client) Valid() bool { return true } +// Path returns vim25.Path (see cache.Client) +func (c *Client) Path() string { + return Path +} + // IsVC returns true if we are connected to a vCenter func (c *Client) IsVC() bool { return c.ServiceContent.About.ApiType == "VirtualCenter" diff --git a/vendor/github.com/vmware/govmomi/vim25/debug/debug.go b/vendor/github.com/vmware/govmomi/vim25/debug/debug.go index 22d547178..048062825 100644 --- a/vendor/github.com/vmware/govmomi/vim25/debug/debug.go +++ b/vendor/github.com/vmware/govmomi/vim25/debug/debug.go @@ -18,8 +18,7 @@ package debug import ( "io" - "os" - "path" + "regexp" ) // Provider specified the interface types must implement to be used as a @@ -30,7 +29,22 @@ type Provider interface { Flush() } +// ReadCloser is a struct that satisfies the io.ReadCloser interface +type ReadCloser struct { + io.Reader + io.Closer +} + +// NewTeeReader wraps io.TeeReader and patches through the Close() function. +func NewTeeReader(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return ReadCloser{ + Reader: io.TeeReader(rc, w), + Closer: rc, + } +} + var currentProvider Provider = nil +var scrubPassword = regexp.MustCompile(`(.*)`) func SetProvider(p Provider) { if currentProvider != nil { @@ -54,28 +68,6 @@ func Flush() { currentProvider.Flush() } -// FileProvider implements a debugging provider that creates a real file for -// every call to NewFile. It maintains a list of all files that it creates, -// such that it can close them when its Flush function is called. -type FileProvider struct { - Path string - - files []*os.File -} - -func (fp *FileProvider) NewFile(p string) io.WriteCloser { - f, err := os.Create(path.Join(fp.Path, p)) - if err != nil { - panic(err) - } - - fp.files = append(fp.files, f) - - return f -} - -func (fp *FileProvider) Flush() { - for _, f := range fp.files { - f.Close() - } +func Scrub(in []byte) []byte { + return scrubPassword.ReplaceAll(in, []byte(`********`)) } diff --git a/vendor/github.com/vmware/govmomi/vim25/debug/file.go b/vendor/github.com/vmware/govmomi/vim25/debug/file.go new file mode 100644 index 000000000..4290a2916 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/debug/file.go @@ -0,0 +1,75 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "io" + "os" + "path" + "sync" +) + +// FileProvider implements a debugging provider that creates a real file for +// every call to NewFile. It maintains a list of all files that it creates, +// such that it can close them when its Flush function is called. +type FileProvider struct { + Path string + + mu sync.Mutex + files []*os.File +} + +func (fp *FileProvider) NewFile(p string) io.WriteCloser { + f, err := os.Create(path.Join(fp.Path, p)) + if err != nil { + panic(err) + } + + fp.mu.Lock() + defer fp.mu.Unlock() + fp.files = append(fp.files, f) + + return NewFileWriterCloser(f, p) +} + +func (fp *FileProvider) Flush() { + fp.mu.Lock() + defer fp.mu.Unlock() + for _, f := range fp.files { + f.Close() + } +} + +type FileWriterCloser struct { + f *os.File + p string +} + +func NewFileWriterCloser(f *os.File, p string) *FileWriterCloser { + return &FileWriterCloser{ + f, + p, + } +} + +func (fwc *FileWriterCloser) Write(p []byte) (n int, err error) { + return fwc.f.Write(Scrub(p)) +} + +func (fwc *FileWriterCloser) Close() error { + return fwc.f.Close() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/debug/log.go b/vendor/github.com/vmware/govmomi/vim25/debug/log.go new file mode 100644 index 000000000..183c606a3 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/debug/log.go @@ -0,0 +1,49 @@ +/* +Copyright (c) 2014 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "fmt" + "io" + "os" +) + +type LogWriterCloser struct { +} + +func NewLogWriterCloser() *LogWriterCloser { + return &LogWriterCloser{} +} + +func (lwc *LogWriterCloser) Write(p []byte) (n int, err error) { + fmt.Fprint(os.Stderr, string(Scrub(p))) + return len(p), nil +} + +func (lwc *LogWriterCloser) Close() error { + return nil +} + +type LogProvider struct { +} + +func (s *LogProvider) NewFile(p string) io.WriteCloser { + return NewLogWriterCloser() +} + +func (s *LogProvider) Flush() { +} diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/methods.go b/vendor/github.com/vmware/govmomi/vim25/methods/methods.go index c3ad23eef..7c7635e07 100644 --- a/vendor/github.com/vmware/govmomi/vim25/methods/methods.go +++ b/vendor/github.com/vmware/govmomi/vim25/methods/methods.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,9 +23,29 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +type AbandonHciWorkflowBody struct { + Req *types.AbandonHciWorkflow `xml:"urn:vim25 AbandonHciWorkflow,omitempty"` + Res *types.AbandonHciWorkflowResponse `xml:"AbandonHciWorkflowResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AbandonHciWorkflowBody) Fault() *soap.Fault { return b.Fault_ } + +func AbandonHciWorkflow(ctx context.Context, r soap.RoundTripper, req *types.AbandonHciWorkflow) (*types.AbandonHciWorkflowResponse, error) { + var reqBody, resBody AbandonHciWorkflowBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type AbdicateDomOwnershipBody struct { Req *types.AbdicateDomOwnership `xml:"urn:vim25 AbdicateDomOwnership,omitempty"` - Res *types.AbdicateDomOwnershipResponse `xml:"urn:vim25 AbdicateDomOwnershipResponse,omitempty"` + Res *types.AbdicateDomOwnershipResponse `xml:"AbdicateDomOwnershipResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -43,9 +63,29 @@ func AbdicateDomOwnership(ctx context.Context, r soap.RoundTripper, req *types.A return resBody.Res, nil } +type AbortCustomization_TaskBody struct { + Req *types.AbortCustomization_Task `xml:"urn:vim25 AbortCustomization_Task,omitempty"` + Res *types.AbortCustomization_TaskResponse `xml:"AbortCustomization_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *AbortCustomization_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func AbortCustomization_Task(ctx context.Context, r soap.RoundTripper, req *types.AbortCustomization_Task) (*types.AbortCustomization_TaskResponse, error) { + var reqBody, resBody AbortCustomization_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type AcknowledgeAlarmBody struct { Req *types.AcknowledgeAlarm `xml:"urn:vim25 AcknowledgeAlarm,omitempty"` - Res *types.AcknowledgeAlarmResponse `xml:"urn:vim25 AcknowledgeAlarmResponse,omitempty"` + Res *types.AcknowledgeAlarmResponse `xml:"AcknowledgeAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -65,7 +105,7 @@ func AcknowledgeAlarm(ctx context.Context, r soap.RoundTripper, req *types.Ackno type AcquireCimServicesTicketBody struct { Req *types.AcquireCimServicesTicket `xml:"urn:vim25 AcquireCimServicesTicket,omitempty"` - Res *types.AcquireCimServicesTicketResponse `xml:"urn:vim25 AcquireCimServicesTicketResponse,omitempty"` + Res *types.AcquireCimServicesTicketResponse `xml:"AcquireCimServicesTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -85,7 +125,7 @@ func AcquireCimServicesTicket(ctx context.Context, r soap.RoundTripper, req *typ type AcquireCloneTicketBody struct { Req *types.AcquireCloneTicket `xml:"urn:vim25 AcquireCloneTicket,omitempty"` - Res *types.AcquireCloneTicketResponse `xml:"urn:vim25 AcquireCloneTicketResponse,omitempty"` + Res *types.AcquireCloneTicketResponse `xml:"AcquireCloneTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -105,7 +145,7 @@ func AcquireCloneTicket(ctx context.Context, r soap.RoundTripper, req *types.Acq type AcquireCredentialsInGuestBody struct { Req *types.AcquireCredentialsInGuest `xml:"urn:vim25 AcquireCredentialsInGuest,omitempty"` - Res *types.AcquireCredentialsInGuestResponse `xml:"urn:vim25 AcquireCredentialsInGuestResponse,omitempty"` + Res *types.AcquireCredentialsInGuestResponse `xml:"AcquireCredentialsInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -125,7 +165,7 @@ func AcquireCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *ty type AcquireGenericServiceTicketBody struct { Req *types.AcquireGenericServiceTicket `xml:"urn:vim25 AcquireGenericServiceTicket,omitempty"` - Res *types.AcquireGenericServiceTicketResponse `xml:"urn:vim25 AcquireGenericServiceTicketResponse,omitempty"` + Res *types.AcquireGenericServiceTicketResponse `xml:"AcquireGenericServiceTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -145,7 +185,7 @@ func AcquireGenericServiceTicket(ctx context.Context, r soap.RoundTripper, req * type AcquireLocalTicketBody struct { Req *types.AcquireLocalTicket `xml:"urn:vim25 AcquireLocalTicket,omitempty"` - Res *types.AcquireLocalTicketResponse `xml:"urn:vim25 AcquireLocalTicketResponse,omitempty"` + Res *types.AcquireLocalTicketResponse `xml:"AcquireLocalTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -165,7 +205,7 @@ func AcquireLocalTicket(ctx context.Context, r soap.RoundTripper, req *types.Acq type AcquireMksTicketBody struct { Req *types.AcquireMksTicket `xml:"urn:vim25 AcquireMksTicket,omitempty"` - Res *types.AcquireMksTicketResponse `xml:"urn:vim25 AcquireMksTicketResponse,omitempty"` + Res *types.AcquireMksTicketResponse `xml:"AcquireMksTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -185,7 +225,7 @@ func AcquireMksTicket(ctx context.Context, r soap.RoundTripper, req *types.Acqui type AcquireTicketBody struct { Req *types.AcquireTicket `xml:"urn:vim25 AcquireTicket,omitempty"` - Res *types.AcquireTicketResponse `xml:"urn:vim25 AcquireTicketResponse,omitempty"` + Res *types.AcquireTicketResponse `xml:"AcquireTicketResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -205,7 +245,7 @@ func AcquireTicket(ctx context.Context, r soap.RoundTripper, req *types.AcquireT type AddAuthorizationRoleBody struct { Req *types.AddAuthorizationRole `xml:"urn:vim25 AddAuthorizationRole,omitempty"` - Res *types.AddAuthorizationRoleResponse `xml:"urn:vim25 AddAuthorizationRoleResponse,omitempty"` + Res *types.AddAuthorizationRoleResponse `xml:"AddAuthorizationRoleResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -225,7 +265,7 @@ func AddAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *types.A type AddCustomFieldDefBody struct { Req *types.AddCustomFieldDef `xml:"urn:vim25 AddCustomFieldDef,omitempty"` - Res *types.AddCustomFieldDefResponse `xml:"urn:vim25 AddCustomFieldDefResponse,omitempty"` + Res *types.AddCustomFieldDefResponse `xml:"AddCustomFieldDefResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -245,7 +285,7 @@ func AddCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.AddC type AddDVPortgroup_TaskBody struct { Req *types.AddDVPortgroup_Task `xml:"urn:vim25 AddDVPortgroup_Task,omitempty"` - Res *types.AddDVPortgroup_TaskResponse `xml:"urn:vim25 AddDVPortgroup_TaskResponse,omitempty"` + Res *types.AddDVPortgroup_TaskResponse `xml:"AddDVPortgroup_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -265,7 +305,7 @@ func AddDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types.Ad type AddDisks_TaskBody struct { Req *types.AddDisks_Task `xml:"urn:vim25 AddDisks_Task,omitempty"` - Res *types.AddDisks_TaskResponse `xml:"urn:vim25 AddDisks_TaskResponse,omitempty"` + Res *types.AddDisks_TaskResponse `xml:"AddDisks_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -285,7 +325,7 @@ func AddDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.AddDisks type AddFilterBody struct { Req *types.AddFilter `xml:"urn:vim25 AddFilter,omitempty"` - Res *types.AddFilterResponse `xml:"urn:vim25 AddFilterResponse,omitempty"` + Res *types.AddFilterResponse `xml:"AddFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -305,7 +345,7 @@ func AddFilter(ctx context.Context, r soap.RoundTripper, req *types.AddFilter) ( type AddFilterEntitiesBody struct { Req *types.AddFilterEntities `xml:"urn:vim25 AddFilterEntities,omitempty"` - Res *types.AddFilterEntitiesResponse `xml:"urn:vim25 AddFilterEntitiesResponse,omitempty"` + Res *types.AddFilterEntitiesResponse `xml:"AddFilterEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -325,7 +365,7 @@ func AddFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.AddF type AddGuestAliasBody struct { Req *types.AddGuestAlias `xml:"urn:vim25 AddGuestAlias,omitempty"` - Res *types.AddGuestAliasResponse `xml:"urn:vim25 AddGuestAliasResponse,omitempty"` + Res *types.AddGuestAliasResponse `xml:"AddGuestAliasResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -345,7 +385,7 @@ func AddGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.AddGuest type AddHost_TaskBody struct { Req *types.AddHost_Task `xml:"urn:vim25 AddHost_Task,omitempty"` - Res *types.AddHost_TaskResponse `xml:"urn:vim25 AddHost_TaskResponse,omitempty"` + Res *types.AddHost_TaskResponse `xml:"AddHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -365,7 +405,7 @@ func AddHost_Task(ctx context.Context, r soap.RoundTripper, req *types.AddHost_T type AddInternetScsiSendTargetsBody struct { Req *types.AddInternetScsiSendTargets `xml:"urn:vim25 AddInternetScsiSendTargets,omitempty"` - Res *types.AddInternetScsiSendTargetsResponse `xml:"urn:vim25 AddInternetScsiSendTargetsResponse,omitempty"` + Res *types.AddInternetScsiSendTargetsResponse `xml:"AddInternetScsiSendTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -385,7 +425,7 @@ func AddInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req *t type AddInternetScsiStaticTargetsBody struct { Req *types.AddInternetScsiStaticTargets `xml:"urn:vim25 AddInternetScsiStaticTargets,omitempty"` - Res *types.AddInternetScsiStaticTargetsResponse `xml:"urn:vim25 AddInternetScsiStaticTargetsResponse,omitempty"` + Res *types.AddInternetScsiStaticTargetsResponse `xml:"AddInternetScsiStaticTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -405,7 +445,7 @@ func AddInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, req type AddKeyBody struct { Req *types.AddKey `xml:"urn:vim25 AddKey,omitempty"` - Res *types.AddKeyResponse `xml:"urn:vim25 AddKeyResponse,omitempty"` + Res *types.AddKeyResponse `xml:"AddKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -425,7 +465,7 @@ func AddKey(ctx context.Context, r soap.RoundTripper, req *types.AddKey) (*types type AddKeysBody struct { Req *types.AddKeys `xml:"urn:vim25 AddKeys,omitempty"` - Res *types.AddKeysResponse `xml:"urn:vim25 AddKeysResponse,omitempty"` + Res *types.AddKeysResponse `xml:"AddKeysResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -445,7 +485,7 @@ func AddKeys(ctx context.Context, r soap.RoundTripper, req *types.AddKeys) (*typ type AddLicenseBody struct { Req *types.AddLicense `xml:"urn:vim25 AddLicense,omitempty"` - Res *types.AddLicenseResponse `xml:"urn:vim25 AddLicenseResponse,omitempty"` + Res *types.AddLicenseResponse `xml:"AddLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -465,7 +505,7 @@ func AddLicense(ctx context.Context, r soap.RoundTripper, req *types.AddLicense) type AddMonitoredEntitiesBody struct { Req *types.AddMonitoredEntities `xml:"urn:vim25 AddMonitoredEntities,omitempty"` - Res *types.AddMonitoredEntitiesResponse `xml:"urn:vim25 AddMonitoredEntitiesResponse,omitempty"` + Res *types.AddMonitoredEntitiesResponse `xml:"AddMonitoredEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -485,7 +525,7 @@ func AddMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types.A type AddNetworkResourcePoolBody struct { Req *types.AddNetworkResourcePool `xml:"urn:vim25 AddNetworkResourcePool,omitempty"` - Res *types.AddNetworkResourcePoolResponse `xml:"urn:vim25 AddNetworkResourcePoolResponse,omitempty"` + Res *types.AddNetworkResourcePoolResponse `xml:"AddNetworkResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -505,7 +545,7 @@ func AddNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *types type AddPortGroupBody struct { Req *types.AddPortGroup `xml:"urn:vim25 AddPortGroup,omitempty"` - Res *types.AddPortGroupResponse `xml:"urn:vim25 AddPortGroupResponse,omitempty"` + Res *types.AddPortGroupResponse `xml:"AddPortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -525,7 +565,7 @@ func AddPortGroup(ctx context.Context, r soap.RoundTripper, req *types.AddPortGr type AddServiceConsoleVirtualNicBody struct { Req *types.AddServiceConsoleVirtualNic `xml:"urn:vim25 AddServiceConsoleVirtualNic,omitempty"` - Res *types.AddServiceConsoleVirtualNicResponse `xml:"urn:vim25 AddServiceConsoleVirtualNicResponse,omitempty"` + Res *types.AddServiceConsoleVirtualNicResponse `xml:"AddServiceConsoleVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -545,7 +585,7 @@ func AddServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, req * type AddStandaloneHost_TaskBody struct { Req *types.AddStandaloneHost_Task `xml:"urn:vim25 AddStandaloneHost_Task,omitempty"` - Res *types.AddStandaloneHost_TaskResponse `xml:"urn:vim25 AddStandaloneHost_TaskResponse,omitempty"` + Res *types.AddStandaloneHost_TaskResponse `xml:"AddStandaloneHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -565,7 +605,7 @@ func AddStandaloneHost_Task(ctx context.Context, r soap.RoundTripper, req *types type AddVirtualNicBody struct { Req *types.AddVirtualNic `xml:"urn:vim25 AddVirtualNic,omitempty"` - Res *types.AddVirtualNicResponse `xml:"urn:vim25 AddVirtualNicResponse,omitempty"` + Res *types.AddVirtualNicResponse `xml:"AddVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -585,7 +625,7 @@ func AddVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.AddVirtu type AddVirtualSwitchBody struct { Req *types.AddVirtualSwitch `xml:"urn:vim25 AddVirtualSwitch,omitempty"` - Res *types.AddVirtualSwitchResponse `xml:"urn:vim25 AddVirtualSwitchResponse,omitempty"` + Res *types.AddVirtualSwitchResponse `xml:"AddVirtualSwitchResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -605,7 +645,7 @@ func AddVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.AddVi type AllocateIpv4AddressBody struct { Req *types.AllocateIpv4Address `xml:"urn:vim25 AllocateIpv4Address,omitempty"` - Res *types.AllocateIpv4AddressResponse `xml:"urn:vim25 AllocateIpv4AddressResponse,omitempty"` + Res *types.AllocateIpv4AddressResponse `xml:"AllocateIpv4AddressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -625,7 +665,7 @@ func AllocateIpv4Address(ctx context.Context, r soap.RoundTripper, req *types.Al type AllocateIpv6AddressBody struct { Req *types.AllocateIpv6Address `xml:"urn:vim25 AllocateIpv6Address,omitempty"` - Res *types.AllocateIpv6AddressResponse `xml:"urn:vim25 AllocateIpv6AddressResponse,omitempty"` + Res *types.AllocateIpv6AddressResponse `xml:"AllocateIpv6AddressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -645,7 +685,7 @@ func AllocateIpv6Address(ctx context.Context, r soap.RoundTripper, req *types.Al type AnswerVMBody struct { Req *types.AnswerVM `xml:"urn:vim25 AnswerVM,omitempty"` - Res *types.AnswerVMResponse `xml:"urn:vim25 AnswerVMResponse,omitempty"` + Res *types.AnswerVMResponse `xml:"AnswerVMResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -665,7 +705,7 @@ func AnswerVM(ctx context.Context, r soap.RoundTripper, req *types.AnswerVM) (*t type ApplyEntitiesConfig_TaskBody struct { Req *types.ApplyEntitiesConfig_Task `xml:"urn:vim25 ApplyEntitiesConfig_Task,omitempty"` - Res *types.ApplyEntitiesConfig_TaskResponse `xml:"urn:vim25 ApplyEntitiesConfig_TaskResponse,omitempty"` + Res *types.ApplyEntitiesConfig_TaskResponse `xml:"ApplyEntitiesConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -685,7 +725,7 @@ func ApplyEntitiesConfig_Task(ctx context.Context, r soap.RoundTripper, req *typ type ApplyEvcModeVM_TaskBody struct { Req *types.ApplyEvcModeVM_Task `xml:"urn:vim25 ApplyEvcModeVM_Task,omitempty"` - Res *types.ApplyEvcModeVM_TaskResponse `xml:"urn:vim25 ApplyEvcModeVM_TaskResponse,omitempty"` + Res *types.ApplyEvcModeVM_TaskResponse `xml:"ApplyEvcModeVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -705,7 +745,7 @@ func ApplyEvcModeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Ap type ApplyHostConfig_TaskBody struct { Req *types.ApplyHostConfig_Task `xml:"urn:vim25 ApplyHostConfig_Task,omitempty"` - Res *types.ApplyHostConfig_TaskResponse `xml:"urn:vim25 ApplyHostConfig_TaskResponse,omitempty"` + Res *types.ApplyHostConfig_TaskResponse `xml:"ApplyHostConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -725,7 +765,7 @@ func ApplyHostConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.A type ApplyRecommendationBody struct { Req *types.ApplyRecommendation `xml:"urn:vim25 ApplyRecommendation,omitempty"` - Res *types.ApplyRecommendationResponse `xml:"urn:vim25 ApplyRecommendationResponse,omitempty"` + Res *types.ApplyRecommendationResponse `xml:"ApplyRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -745,7 +785,7 @@ func ApplyRecommendation(ctx context.Context, r soap.RoundTripper, req *types.Ap type ApplyStorageDrsRecommendationToPod_TaskBody struct { Req *types.ApplyStorageDrsRecommendationToPod_Task `xml:"urn:vim25 ApplyStorageDrsRecommendationToPod_Task,omitempty"` - Res *types.ApplyStorageDrsRecommendationToPod_TaskResponse `xml:"urn:vim25 ApplyStorageDrsRecommendationToPod_TaskResponse,omitempty"` + Res *types.ApplyStorageDrsRecommendationToPod_TaskResponse `xml:"ApplyStorageDrsRecommendationToPod_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -765,7 +805,7 @@ func ApplyStorageDrsRecommendationToPod_Task(ctx context.Context, r soap.RoundTr type ApplyStorageDrsRecommendation_TaskBody struct { Req *types.ApplyStorageDrsRecommendation_Task `xml:"urn:vim25 ApplyStorageDrsRecommendation_Task,omitempty"` - Res *types.ApplyStorageDrsRecommendation_TaskResponse `xml:"urn:vim25 ApplyStorageDrsRecommendation_TaskResponse,omitempty"` + Res *types.ApplyStorageDrsRecommendation_TaskResponse `xml:"ApplyStorageDrsRecommendation_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -785,7 +825,7 @@ func ApplyStorageDrsRecommendation_Task(ctx context.Context, r soap.RoundTripper type AreAlarmActionsEnabledBody struct { Req *types.AreAlarmActionsEnabled `xml:"urn:vim25 AreAlarmActionsEnabled,omitempty"` - Res *types.AreAlarmActionsEnabledResponse `xml:"urn:vim25 AreAlarmActionsEnabledResponse,omitempty"` + Res *types.AreAlarmActionsEnabledResponse `xml:"AreAlarmActionsEnabledResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -805,7 +845,7 @@ func AreAlarmActionsEnabled(ctx context.Context, r soap.RoundTripper, req *types type AssignUserToGroupBody struct { Req *types.AssignUserToGroup `xml:"urn:vim25 AssignUserToGroup,omitempty"` - Res *types.AssignUserToGroupResponse `xml:"urn:vim25 AssignUserToGroupResponse,omitempty"` + Res *types.AssignUserToGroupResponse `xml:"AssignUserToGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -825,7 +865,7 @@ func AssignUserToGroup(ctx context.Context, r soap.RoundTripper, req *types.Assi type AssociateProfileBody struct { Req *types.AssociateProfile `xml:"urn:vim25 AssociateProfile,omitempty"` - Res *types.AssociateProfileResponse `xml:"urn:vim25 AssociateProfileResponse,omitempty"` + Res *types.AssociateProfileResponse `xml:"AssociateProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -845,7 +885,7 @@ func AssociateProfile(ctx context.Context, r soap.RoundTripper, req *types.Assoc type AttachDisk_TaskBody struct { Req *types.AttachDisk_Task `xml:"urn:vim25 AttachDisk_Task,omitempty"` - Res *types.AttachDisk_TaskResponse `xml:"urn:vim25 AttachDisk_TaskResponse,omitempty"` + Res *types.AttachDisk_TaskResponse `xml:"AttachDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -865,7 +905,7 @@ func AttachDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Attach type AttachScsiLunBody struct { Req *types.AttachScsiLun `xml:"urn:vim25 AttachScsiLun,omitempty"` - Res *types.AttachScsiLunResponse `xml:"urn:vim25 AttachScsiLunResponse,omitempty"` + Res *types.AttachScsiLunResponse `xml:"AttachScsiLunResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -885,7 +925,7 @@ func AttachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.AttachSc type AttachScsiLunEx_TaskBody struct { Req *types.AttachScsiLunEx_Task `xml:"urn:vim25 AttachScsiLunEx_Task,omitempty"` - Res *types.AttachScsiLunEx_TaskResponse `xml:"urn:vim25 AttachScsiLunEx_TaskResponse,omitempty"` + Res *types.AttachScsiLunEx_TaskResponse `xml:"AttachScsiLunEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -905,7 +945,7 @@ func AttachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.A type AttachTagToVStorageObjectBody struct { Req *types.AttachTagToVStorageObject `xml:"urn:vim25 AttachTagToVStorageObject,omitempty"` - Res *types.AttachTagToVStorageObjectResponse `xml:"urn:vim25 AttachTagToVStorageObjectResponse,omitempty"` + Res *types.AttachTagToVStorageObjectResponse `xml:"AttachTagToVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -925,7 +965,7 @@ func AttachTagToVStorageObject(ctx context.Context, r soap.RoundTripper, req *ty type AttachVmfsExtentBody struct { Req *types.AttachVmfsExtent `xml:"urn:vim25 AttachVmfsExtent,omitempty"` - Res *types.AttachVmfsExtentResponse `xml:"urn:vim25 AttachVmfsExtentResponse,omitempty"` + Res *types.AttachVmfsExtentResponse `xml:"AttachVmfsExtentResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -945,7 +985,7 @@ func AttachVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.Attac type AutoStartPowerOffBody struct { Req *types.AutoStartPowerOff `xml:"urn:vim25 AutoStartPowerOff,omitempty"` - Res *types.AutoStartPowerOffResponse `xml:"urn:vim25 AutoStartPowerOffResponse,omitempty"` + Res *types.AutoStartPowerOffResponse `xml:"AutoStartPowerOffResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -965,7 +1005,7 @@ func AutoStartPowerOff(ctx context.Context, r soap.RoundTripper, req *types.Auto type AutoStartPowerOnBody struct { Req *types.AutoStartPowerOn `xml:"urn:vim25 AutoStartPowerOn,omitempty"` - Res *types.AutoStartPowerOnResponse `xml:"urn:vim25 AutoStartPowerOnResponse,omitempty"` + Res *types.AutoStartPowerOnResponse `xml:"AutoStartPowerOnResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -985,7 +1025,7 @@ func AutoStartPowerOn(ctx context.Context, r soap.RoundTripper, req *types.AutoS type BackupFirmwareConfigurationBody struct { Req *types.BackupFirmwareConfiguration `xml:"urn:vim25 BackupFirmwareConfiguration,omitempty"` - Res *types.BackupFirmwareConfigurationResponse `xml:"urn:vim25 BackupFirmwareConfigurationResponse,omitempty"` + Res *types.BackupFirmwareConfigurationResponse `xml:"BackupFirmwareConfigurationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1003,9 +1043,69 @@ func BackupFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req * return resBody.Res, nil } +type BatchAddHostsToCluster_TaskBody struct { + Req *types.BatchAddHostsToCluster_Task `xml:"urn:vim25 BatchAddHostsToCluster_Task,omitempty"` + Res *types.BatchAddHostsToCluster_TaskResponse `xml:"BatchAddHostsToCluster_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BatchAddHostsToCluster_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func BatchAddHostsToCluster_Task(ctx context.Context, r soap.RoundTripper, req *types.BatchAddHostsToCluster_Task) (*types.BatchAddHostsToCluster_TaskResponse, error) { + var reqBody, resBody BatchAddHostsToCluster_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BatchAddStandaloneHosts_TaskBody struct { + Req *types.BatchAddStandaloneHosts_Task `xml:"urn:vim25 BatchAddStandaloneHosts_Task,omitempty"` + Res *types.BatchAddStandaloneHosts_TaskResponse `xml:"BatchAddStandaloneHosts_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BatchAddStandaloneHosts_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func BatchAddStandaloneHosts_Task(ctx context.Context, r soap.RoundTripper, req *types.BatchAddStandaloneHosts_Task) (*types.BatchAddStandaloneHosts_TaskResponse, error) { + var reqBody, resBody BatchAddStandaloneHosts_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type BatchQueryConnectInfoBody struct { + Req *types.BatchQueryConnectInfo `xml:"urn:vim25 BatchQueryConnectInfo,omitempty"` + Res *types.BatchQueryConnectInfoResponse `xml:"BatchQueryConnectInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *BatchQueryConnectInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func BatchQueryConnectInfo(ctx context.Context, r soap.RoundTripper, req *types.BatchQueryConnectInfo) (*types.BatchQueryConnectInfoResponse, error) { + var reqBody, resBody BatchQueryConnectInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type BindVnicBody struct { Req *types.BindVnic `xml:"urn:vim25 BindVnic,omitempty"` - Res *types.BindVnicResponse `xml:"urn:vim25 BindVnicResponse,omitempty"` + Res *types.BindVnicResponse `xml:"BindVnicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1025,7 +1125,7 @@ func BindVnic(ctx context.Context, r soap.RoundTripper, req *types.BindVnic) (*t type BrowseDiagnosticLogBody struct { Req *types.BrowseDiagnosticLog `xml:"urn:vim25 BrowseDiagnosticLog,omitempty"` - Res *types.BrowseDiagnosticLogResponse `xml:"urn:vim25 BrowseDiagnosticLogResponse,omitempty"` + Res *types.BrowseDiagnosticLogResponse `xml:"BrowseDiagnosticLogResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1045,7 +1145,7 @@ func BrowseDiagnosticLog(ctx context.Context, r soap.RoundTripper, req *types.Br type CanProvisionObjectsBody struct { Req *types.CanProvisionObjects `xml:"urn:vim25 CanProvisionObjects,omitempty"` - Res *types.CanProvisionObjectsResponse `xml:"urn:vim25 CanProvisionObjectsResponse,omitempty"` + Res *types.CanProvisionObjectsResponse `xml:"CanProvisionObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1065,7 +1165,7 @@ func CanProvisionObjects(ctx context.Context, r soap.RoundTripper, req *types.Ca type CancelRecommendationBody struct { Req *types.CancelRecommendation `xml:"urn:vim25 CancelRecommendation,omitempty"` - Res *types.CancelRecommendationResponse `xml:"urn:vim25 CancelRecommendationResponse,omitempty"` + Res *types.CancelRecommendationResponse `xml:"CancelRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1085,7 +1185,7 @@ func CancelRecommendation(ctx context.Context, r soap.RoundTripper, req *types.C type CancelRetrievePropertiesExBody struct { Req *types.CancelRetrievePropertiesEx `xml:"urn:vim25 CancelRetrievePropertiesEx,omitempty"` - Res *types.CancelRetrievePropertiesExResponse `xml:"urn:vim25 CancelRetrievePropertiesExResponse,omitempty"` + Res *types.CancelRetrievePropertiesExResponse `xml:"CancelRetrievePropertiesExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1105,7 +1205,7 @@ func CancelRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *t type CancelStorageDrsRecommendationBody struct { Req *types.CancelStorageDrsRecommendation `xml:"urn:vim25 CancelStorageDrsRecommendation,omitempty"` - Res *types.CancelStorageDrsRecommendationResponse `xml:"urn:vim25 CancelStorageDrsRecommendationResponse,omitempty"` + Res *types.CancelStorageDrsRecommendationResponse `xml:"CancelStorageDrsRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1125,7 +1225,7 @@ func CancelStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, re type CancelTaskBody struct { Req *types.CancelTask `xml:"urn:vim25 CancelTask,omitempty"` - Res *types.CancelTaskResponse `xml:"urn:vim25 CancelTaskResponse,omitempty"` + Res *types.CancelTaskResponse `xml:"CancelTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1145,7 +1245,7 @@ func CancelTask(ctx context.Context, r soap.RoundTripper, req *types.CancelTask) type CancelWaitForUpdatesBody struct { Req *types.CancelWaitForUpdates `xml:"urn:vim25 CancelWaitForUpdates,omitempty"` - Res *types.CancelWaitForUpdatesResponse `xml:"urn:vim25 CancelWaitForUpdatesResponse,omitempty"` + Res *types.CancelWaitForUpdatesResponse `xml:"CancelWaitForUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1165,7 +1265,7 @@ func CancelWaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.C type CertMgrRefreshCACertificatesAndCRLs_TaskBody struct { Req *types.CertMgrRefreshCACertificatesAndCRLs_Task `xml:"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_Task,omitempty"` - Res *types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse `xml:"urn:vim25 CertMgrRefreshCACertificatesAndCRLs_TaskResponse,omitempty"` + Res *types.CertMgrRefreshCACertificatesAndCRLs_TaskResponse `xml:"CertMgrRefreshCACertificatesAndCRLs_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1185,7 +1285,7 @@ func CertMgrRefreshCACertificatesAndCRLs_Task(ctx context.Context, r soap.RoundT type CertMgrRefreshCertificates_TaskBody struct { Req *types.CertMgrRefreshCertificates_Task `xml:"urn:vim25 CertMgrRefreshCertificates_Task,omitempty"` - Res *types.CertMgrRefreshCertificates_TaskResponse `xml:"urn:vim25 CertMgrRefreshCertificates_TaskResponse,omitempty"` + Res *types.CertMgrRefreshCertificates_TaskResponse `xml:"CertMgrRefreshCertificates_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1205,7 +1305,7 @@ func CertMgrRefreshCertificates_Task(ctx context.Context, r soap.RoundTripper, r type CertMgrRevokeCertificates_TaskBody struct { Req *types.CertMgrRevokeCertificates_Task `xml:"urn:vim25 CertMgrRevokeCertificates_Task,omitempty"` - Res *types.CertMgrRevokeCertificates_TaskResponse `xml:"urn:vim25 CertMgrRevokeCertificates_TaskResponse,omitempty"` + Res *types.CertMgrRevokeCertificates_TaskResponse `xml:"CertMgrRevokeCertificates_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1225,7 +1325,7 @@ func CertMgrRevokeCertificates_Task(ctx context.Context, r soap.RoundTripper, re type ChangeAccessModeBody struct { Req *types.ChangeAccessMode `xml:"urn:vim25 ChangeAccessMode,omitempty"` - Res *types.ChangeAccessModeResponse `xml:"urn:vim25 ChangeAccessModeResponse,omitempty"` + Res *types.ChangeAccessModeResponse `xml:"ChangeAccessModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1245,7 +1345,7 @@ func ChangeAccessMode(ctx context.Context, r soap.RoundTripper, req *types.Chang type ChangeFileAttributesInGuestBody struct { Req *types.ChangeFileAttributesInGuest `xml:"urn:vim25 ChangeFileAttributesInGuest,omitempty"` - Res *types.ChangeFileAttributesInGuestResponse `xml:"urn:vim25 ChangeFileAttributesInGuestResponse,omitempty"` + Res *types.ChangeFileAttributesInGuestResponse `xml:"ChangeFileAttributesInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1265,7 +1365,7 @@ func ChangeFileAttributesInGuest(ctx context.Context, r soap.RoundTripper, req * type ChangeKey_TaskBody struct { Req *types.ChangeKey_Task `xml:"urn:vim25 ChangeKey_Task,omitempty"` - Res *types.ChangeKey_TaskResponse `xml:"urn:vim25 ChangeKey_TaskResponse,omitempty"` + Res *types.ChangeKey_TaskResponse `xml:"ChangeKey_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1285,7 +1385,7 @@ func ChangeKey_Task(ctx context.Context, r soap.RoundTripper, req *types.ChangeK type ChangeLockdownModeBody struct { Req *types.ChangeLockdownMode `xml:"urn:vim25 ChangeLockdownMode,omitempty"` - Res *types.ChangeLockdownModeResponse `xml:"urn:vim25 ChangeLockdownModeResponse,omitempty"` + Res *types.ChangeLockdownModeResponse `xml:"ChangeLockdownModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1305,7 +1405,7 @@ func ChangeLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.Cha type ChangeNFSUserPasswordBody struct { Req *types.ChangeNFSUserPassword `xml:"urn:vim25 ChangeNFSUserPassword,omitempty"` - Res *types.ChangeNFSUserPasswordResponse `xml:"urn:vim25 ChangeNFSUserPasswordResponse,omitempty"` + Res *types.ChangeNFSUserPasswordResponse `xml:"ChangeNFSUserPasswordResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1325,7 +1425,7 @@ func ChangeNFSUserPassword(ctx context.Context, r soap.RoundTripper, req *types. type ChangeOwnerBody struct { Req *types.ChangeOwner `xml:"urn:vim25 ChangeOwner,omitempty"` - Res *types.ChangeOwnerResponse `xml:"urn:vim25 ChangeOwnerResponse,omitempty"` + Res *types.ChangeOwnerResponse `xml:"ChangeOwnerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1343,9 +1443,29 @@ func ChangeOwner(ctx context.Context, r soap.RoundTripper, req *types.ChangeOwne return resBody.Res, nil } +type ChangePasswordBody struct { + Req *types.ChangePassword `xml:"urn:vim25 ChangePassword,omitempty"` + Res *types.ChangePasswordResponse `xml:"ChangePasswordResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ChangePasswordBody) Fault() *soap.Fault { return b.Fault_ } + +func ChangePassword(ctx context.Context, r soap.RoundTripper, req *types.ChangePassword) (*types.ChangePasswordResponse, error) { + var reqBody, resBody ChangePasswordBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CheckAddHostEvc_TaskBody struct { Req *types.CheckAddHostEvc_Task `xml:"urn:vim25 CheckAddHostEvc_Task,omitempty"` - Res *types.CheckAddHostEvc_TaskResponse `xml:"urn:vim25 CheckAddHostEvc_TaskResponse,omitempty"` + Res *types.CheckAddHostEvc_TaskResponse `xml:"CheckAddHostEvc_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1365,7 +1485,7 @@ func CheckAddHostEvc_Task(ctx context.Context, r soap.RoundTripper, req *types.C type CheckAnswerFileStatus_TaskBody struct { Req *types.CheckAnswerFileStatus_Task `xml:"urn:vim25 CheckAnswerFileStatus_Task,omitempty"` - Res *types.CheckAnswerFileStatus_TaskResponse `xml:"urn:vim25 CheckAnswerFileStatus_TaskResponse,omitempty"` + Res *types.CheckAnswerFileStatus_TaskResponse `xml:"CheckAnswerFileStatus_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1385,7 +1505,7 @@ func CheckAnswerFileStatus_Task(ctx context.Context, r soap.RoundTripper, req *t type CheckClone_TaskBody struct { Req *types.CheckClone_Task `xml:"urn:vim25 CheckClone_Task,omitempty"` - Res *types.CheckClone_TaskResponse `xml:"urn:vim25 CheckClone_TaskResponse,omitempty"` + Res *types.CheckClone_TaskResponse `xml:"CheckClone_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1405,7 +1525,7 @@ func CheckClone_Task(ctx context.Context, r soap.RoundTripper, req *types.CheckC type CheckCompatibility_TaskBody struct { Req *types.CheckCompatibility_Task `xml:"urn:vim25 CheckCompatibility_Task,omitempty"` - Res *types.CheckCompatibility_TaskResponse `xml:"urn:vim25 CheckCompatibility_TaskResponse,omitempty"` + Res *types.CheckCompatibility_TaskResponse `xml:"CheckCompatibility_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1425,7 +1545,7 @@ func CheckCompatibility_Task(ctx context.Context, r soap.RoundTripper, req *type type CheckCompliance_TaskBody struct { Req *types.CheckCompliance_Task `xml:"urn:vim25 CheckCompliance_Task,omitempty"` - Res *types.CheckCompliance_TaskResponse `xml:"urn:vim25 CheckCompliance_TaskResponse,omitempty"` + Res *types.CheckCompliance_TaskResponse `xml:"CheckCompliance_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1445,7 +1565,7 @@ func CheckCompliance_Task(ctx context.Context, r soap.RoundTripper, req *types.C type CheckConfigureEvcMode_TaskBody struct { Req *types.CheckConfigureEvcMode_Task `xml:"urn:vim25 CheckConfigureEvcMode_Task,omitempty"` - Res *types.CheckConfigureEvcMode_TaskResponse `xml:"urn:vim25 CheckConfigureEvcMode_TaskResponse,omitempty"` + Res *types.CheckConfigureEvcMode_TaskResponse `xml:"CheckConfigureEvcMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1465,7 +1585,7 @@ func CheckConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *t type CheckCustomizationResourcesBody struct { Req *types.CheckCustomizationResources `xml:"urn:vim25 CheckCustomizationResources,omitempty"` - Res *types.CheckCustomizationResourcesResponse `xml:"urn:vim25 CheckCustomizationResourcesResponse,omitempty"` + Res *types.CheckCustomizationResourcesResponse `xml:"CheckCustomizationResourcesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1485,7 +1605,7 @@ func CheckCustomizationResources(ctx context.Context, r soap.RoundTripper, req * type CheckCustomizationSpecBody struct { Req *types.CheckCustomizationSpec `xml:"urn:vim25 CheckCustomizationSpec,omitempty"` - Res *types.CheckCustomizationSpecResponse `xml:"urn:vim25 CheckCustomizationSpecResponse,omitempty"` + Res *types.CheckCustomizationSpecResponse `xml:"CheckCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1505,7 +1625,7 @@ func CheckCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types type CheckForUpdatesBody struct { Req *types.CheckForUpdates `xml:"urn:vim25 CheckForUpdates,omitempty"` - Res *types.CheckForUpdatesResponse `xml:"urn:vim25 CheckForUpdatesResponse,omitempty"` + Res *types.CheckForUpdatesResponse `xml:"CheckForUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1525,7 +1645,7 @@ func CheckForUpdates(ctx context.Context, r soap.RoundTripper, req *types.CheckF type CheckHostPatch_TaskBody struct { Req *types.CheckHostPatch_Task `xml:"urn:vim25 CheckHostPatch_Task,omitempty"` - Res *types.CheckHostPatch_TaskResponse `xml:"urn:vim25 CheckHostPatch_TaskResponse,omitempty"` + Res *types.CheckHostPatch_TaskResponse `xml:"CheckHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1545,7 +1665,7 @@ func CheckHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.Ch type CheckInstantClone_TaskBody struct { Req *types.CheckInstantClone_Task `xml:"urn:vim25 CheckInstantClone_Task,omitempty"` - Res *types.CheckInstantClone_TaskResponse `xml:"urn:vim25 CheckInstantClone_TaskResponse,omitempty"` + Res *types.CheckInstantClone_TaskResponse `xml:"CheckInstantClone_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1565,7 +1685,7 @@ func CheckInstantClone_Task(ctx context.Context, r soap.RoundTripper, req *types type CheckLicenseFeatureBody struct { Req *types.CheckLicenseFeature `xml:"urn:vim25 CheckLicenseFeature,omitempty"` - Res *types.CheckLicenseFeatureResponse `xml:"urn:vim25 CheckLicenseFeatureResponse,omitempty"` + Res *types.CheckLicenseFeatureResponse `xml:"CheckLicenseFeatureResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1585,7 +1705,7 @@ func CheckLicenseFeature(ctx context.Context, r soap.RoundTripper, req *types.Ch type CheckMigrate_TaskBody struct { Req *types.CheckMigrate_Task `xml:"urn:vim25 CheckMigrate_Task,omitempty"` - Res *types.CheckMigrate_TaskResponse `xml:"urn:vim25 CheckMigrate_TaskResponse,omitempty"` + Res *types.CheckMigrate_TaskResponse `xml:"CheckMigrate_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1605,7 +1725,7 @@ func CheckMigrate_Task(ctx context.Context, r soap.RoundTripper, req *types.Chec type CheckPowerOn_TaskBody struct { Req *types.CheckPowerOn_Task `xml:"urn:vim25 CheckPowerOn_Task,omitempty"` - Res *types.CheckPowerOn_TaskResponse `xml:"urn:vim25 CheckPowerOn_TaskResponse,omitempty"` + Res *types.CheckPowerOn_TaskResponse `xml:"CheckPowerOn_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1625,7 +1745,7 @@ func CheckPowerOn_Task(ctx context.Context, r soap.RoundTripper, req *types.Chec type CheckProfileCompliance_TaskBody struct { Req *types.CheckProfileCompliance_Task `xml:"urn:vim25 CheckProfileCompliance_Task,omitempty"` - Res *types.CheckProfileCompliance_TaskResponse `xml:"urn:vim25 CheckProfileCompliance_TaskResponse,omitempty"` + Res *types.CheckProfileCompliance_TaskResponse `xml:"CheckProfileCompliance_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1645,7 +1765,7 @@ func CheckProfileCompliance_Task(ctx context.Context, r soap.RoundTripper, req * type CheckRelocate_TaskBody struct { Req *types.CheckRelocate_Task `xml:"urn:vim25 CheckRelocate_Task,omitempty"` - Res *types.CheckRelocate_TaskResponse `xml:"urn:vim25 CheckRelocate_TaskResponse,omitempty"` + Res *types.CheckRelocate_TaskResponse `xml:"CheckRelocate_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1665,7 +1785,7 @@ func CheckRelocate_Task(ctx context.Context, r soap.RoundTripper, req *types.Che type CheckVmConfig_TaskBody struct { Req *types.CheckVmConfig_Task `xml:"urn:vim25 CheckVmConfig_Task,omitempty"` - Res *types.CheckVmConfig_TaskResponse `xml:"urn:vim25 CheckVmConfig_TaskResponse,omitempty"` + Res *types.CheckVmConfig_TaskResponse `xml:"CheckVmConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1685,7 +1805,7 @@ func CheckVmConfig_Task(ctx context.Context, r soap.RoundTripper, req *types.Che type ClearComplianceStatusBody struct { Req *types.ClearComplianceStatus `xml:"urn:vim25 ClearComplianceStatus,omitempty"` - Res *types.ClearComplianceStatusResponse `xml:"urn:vim25 ClearComplianceStatusResponse,omitempty"` + Res *types.ClearComplianceStatusResponse `xml:"ClearComplianceStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1705,7 +1825,7 @@ func ClearComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types. type ClearNFSUserBody struct { Req *types.ClearNFSUser `xml:"urn:vim25 ClearNFSUser,omitempty"` - Res *types.ClearNFSUserResponse `xml:"urn:vim25 ClearNFSUserResponse,omitempty"` + Res *types.ClearNFSUserResponse `xml:"ClearNFSUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1725,7 +1845,7 @@ func ClearNFSUser(ctx context.Context, r soap.RoundTripper, req *types.ClearNFSU type ClearSystemEventLogBody struct { Req *types.ClearSystemEventLog `xml:"urn:vim25 ClearSystemEventLog,omitempty"` - Res *types.ClearSystemEventLogResponse `xml:"urn:vim25 ClearSystemEventLogResponse,omitempty"` + Res *types.ClearSystemEventLogResponse `xml:"ClearSystemEventLogResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1745,7 +1865,7 @@ func ClearSystemEventLog(ctx context.Context, r soap.RoundTripper, req *types.Cl type ClearTriggeredAlarmsBody struct { Req *types.ClearTriggeredAlarms `xml:"urn:vim25 ClearTriggeredAlarms,omitempty"` - Res *types.ClearTriggeredAlarmsResponse `xml:"urn:vim25 ClearTriggeredAlarmsResponse,omitempty"` + Res *types.ClearTriggeredAlarmsResponse `xml:"ClearTriggeredAlarmsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1765,7 +1885,7 @@ func ClearTriggeredAlarms(ctx context.Context, r soap.RoundTripper, req *types.C type ClearVStorageObjectControlFlagsBody struct { Req *types.ClearVStorageObjectControlFlags `xml:"urn:vim25 ClearVStorageObjectControlFlags,omitempty"` - Res *types.ClearVStorageObjectControlFlagsResponse `xml:"urn:vim25 ClearVStorageObjectControlFlagsResponse,omitempty"` + Res *types.ClearVStorageObjectControlFlagsResponse `xml:"ClearVStorageObjectControlFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1785,7 +1905,7 @@ func ClearVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, r type CloneSessionBody struct { Req *types.CloneSession `xml:"urn:vim25 CloneSession,omitempty"` - Res *types.CloneSessionResponse `xml:"urn:vim25 CloneSessionResponse,omitempty"` + Res *types.CloneSessionResponse `xml:"CloneSessionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1805,7 +1925,7 @@ func CloneSession(ctx context.Context, r soap.RoundTripper, req *types.CloneSess type CloneVApp_TaskBody struct { Req *types.CloneVApp_Task `xml:"urn:vim25 CloneVApp_Task,omitempty"` - Res *types.CloneVApp_TaskResponse `xml:"urn:vim25 CloneVApp_TaskResponse,omitempty"` + Res *types.CloneVApp_TaskResponse `xml:"CloneVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1825,7 +1945,7 @@ func CloneVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVA type CloneVM_TaskBody struct { Req *types.CloneVM_Task `xml:"urn:vim25 CloneVM_Task,omitempty"` - Res *types.CloneVM_TaskResponse `xml:"urn:vim25 CloneVM_TaskResponse,omitempty"` + Res *types.CloneVM_TaskResponse `xml:"CloneVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1845,7 +1965,7 @@ func CloneVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CloneVM_T type CloneVStorageObject_TaskBody struct { Req *types.CloneVStorageObject_Task `xml:"urn:vim25 CloneVStorageObject_Task,omitempty"` - Res *types.CloneVStorageObject_TaskResponse `xml:"urn:vim25 CloneVStorageObject_TaskResponse,omitempty"` + Res *types.CloneVStorageObject_TaskResponse `xml:"CloneVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1865,7 +1985,7 @@ func CloneVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *typ type CloseInventoryViewFolderBody struct { Req *types.CloseInventoryViewFolder `xml:"urn:vim25 CloseInventoryViewFolder,omitempty"` - Res *types.CloseInventoryViewFolderResponse `xml:"urn:vim25 CloseInventoryViewFolderResponse,omitempty"` + Res *types.CloseInventoryViewFolderResponse `xml:"CloseInventoryViewFolderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1885,7 +2005,7 @@ func CloseInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *typ type ClusterEnterMaintenanceModeBody struct { Req *types.ClusterEnterMaintenanceMode `xml:"urn:vim25 ClusterEnterMaintenanceMode,omitempty"` - Res *types.ClusterEnterMaintenanceModeResponse `xml:"urn:vim25 ClusterEnterMaintenanceModeResponse,omitempty"` + Res *types.ClusterEnterMaintenanceModeResponse `xml:"ClusterEnterMaintenanceModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1905,7 +2025,7 @@ func ClusterEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req * type CompositeHostProfile_TaskBody struct { Req *types.CompositeHostProfile_Task `xml:"urn:vim25 CompositeHostProfile_Task,omitempty"` - Res *types.CompositeHostProfile_TaskResponse `xml:"urn:vim25 CompositeHostProfile_TaskResponse,omitempty"` + Res *types.CompositeHostProfile_TaskResponse `xml:"CompositeHostProfile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1925,7 +2045,7 @@ func CompositeHostProfile_Task(ctx context.Context, r soap.RoundTripper, req *ty type ComputeDiskPartitionInfoBody struct { Req *types.ComputeDiskPartitionInfo `xml:"urn:vim25 ComputeDiskPartitionInfo,omitempty"` - Res *types.ComputeDiskPartitionInfoResponse `xml:"urn:vim25 ComputeDiskPartitionInfoResponse,omitempty"` + Res *types.ComputeDiskPartitionInfoResponse `xml:"ComputeDiskPartitionInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1945,7 +2065,7 @@ func ComputeDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *typ type ComputeDiskPartitionInfoForResizeBody struct { Req *types.ComputeDiskPartitionInfoForResize `xml:"urn:vim25 ComputeDiskPartitionInfoForResize,omitempty"` - Res *types.ComputeDiskPartitionInfoForResizeResponse `xml:"urn:vim25 ComputeDiskPartitionInfoForResizeResponse,omitempty"` + Res *types.ComputeDiskPartitionInfoForResizeResponse `xml:"ComputeDiskPartitionInfoForResizeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1965,7 +2085,7 @@ func ComputeDiskPartitionInfoForResize(ctx context.Context, r soap.RoundTripper, type ConfigureCryptoKeyBody struct { Req *types.ConfigureCryptoKey `xml:"urn:vim25 ConfigureCryptoKey,omitempty"` - Res *types.ConfigureCryptoKeyResponse `xml:"urn:vim25 ConfigureCryptoKeyResponse,omitempty"` + Res *types.ConfigureCryptoKeyResponse `xml:"ConfigureCryptoKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -1985,7 +2105,7 @@ func ConfigureCryptoKey(ctx context.Context, r soap.RoundTripper, req *types.Con type ConfigureDatastoreIORM_TaskBody struct { Req *types.ConfigureDatastoreIORM_Task `xml:"urn:vim25 ConfigureDatastoreIORM_Task,omitempty"` - Res *types.ConfigureDatastoreIORM_TaskResponse `xml:"urn:vim25 ConfigureDatastoreIORM_TaskResponse,omitempty"` + Res *types.ConfigureDatastoreIORM_TaskResponse `xml:"ConfigureDatastoreIORM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2005,7 +2125,7 @@ func ConfigureDatastoreIORM_Task(ctx context.Context, r soap.RoundTripper, req * type ConfigureDatastorePrincipalBody struct { Req *types.ConfigureDatastorePrincipal `xml:"urn:vim25 ConfigureDatastorePrincipal,omitempty"` - Res *types.ConfigureDatastorePrincipalResponse `xml:"urn:vim25 ConfigureDatastorePrincipalResponse,omitempty"` + Res *types.ConfigureDatastorePrincipalResponse `xml:"ConfigureDatastorePrincipalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2025,7 +2145,7 @@ func ConfigureDatastorePrincipal(ctx context.Context, r soap.RoundTripper, req * type ConfigureEvcMode_TaskBody struct { Req *types.ConfigureEvcMode_Task `xml:"urn:vim25 ConfigureEvcMode_Task,omitempty"` - Res *types.ConfigureEvcMode_TaskResponse `xml:"urn:vim25 ConfigureEvcMode_TaskResponse,omitempty"` + Res *types.ConfigureEvcMode_TaskResponse `xml:"ConfigureEvcMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2043,9 +2163,29 @@ func ConfigureEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types. return resBody.Res, nil } +type ConfigureHCI_TaskBody struct { + Req *types.ConfigureHCI_Task `xml:"urn:vim25 ConfigureHCI_Task,omitempty"` + Res *types.ConfigureHCI_TaskResponse `xml:"ConfigureHCI_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConfigureHCI_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConfigureHCI_Task(ctx context.Context, r soap.RoundTripper, req *types.ConfigureHCI_Task) (*types.ConfigureHCI_TaskResponse, error) { + var reqBody, resBody ConfigureHCI_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ConfigureHostCache_TaskBody struct { Req *types.ConfigureHostCache_Task `xml:"urn:vim25 ConfigureHostCache_Task,omitempty"` - Res *types.ConfigureHostCache_TaskResponse `xml:"urn:vim25 ConfigureHostCache_TaskResponse,omitempty"` + Res *types.ConfigureHostCache_TaskResponse `xml:"ConfigureHostCache_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2065,7 +2205,7 @@ func ConfigureHostCache_Task(ctx context.Context, r soap.RoundTripper, req *type type ConfigureLicenseSourceBody struct { Req *types.ConfigureLicenseSource `xml:"urn:vim25 ConfigureLicenseSource,omitempty"` - Res *types.ConfigureLicenseSourceResponse `xml:"urn:vim25 ConfigureLicenseSourceResponse,omitempty"` + Res *types.ConfigureLicenseSourceResponse `xml:"ConfigureLicenseSourceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2085,7 +2225,7 @@ func ConfigureLicenseSource(ctx context.Context, r soap.RoundTripper, req *types type ConfigurePowerPolicyBody struct { Req *types.ConfigurePowerPolicy `xml:"urn:vim25 ConfigurePowerPolicy,omitempty"` - Res *types.ConfigurePowerPolicyResponse `xml:"urn:vim25 ConfigurePowerPolicyResponse,omitempty"` + Res *types.ConfigurePowerPolicyResponse `xml:"ConfigurePowerPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2105,7 +2245,7 @@ func ConfigurePowerPolicy(ctx context.Context, r soap.RoundTripper, req *types.C type ConfigureStorageDrsForPod_TaskBody struct { Req *types.ConfigureStorageDrsForPod_Task `xml:"urn:vim25 ConfigureStorageDrsForPod_Task,omitempty"` - Res *types.ConfigureStorageDrsForPod_TaskResponse `xml:"urn:vim25 ConfigureStorageDrsForPod_TaskResponse,omitempty"` + Res *types.ConfigureStorageDrsForPod_TaskResponse `xml:"ConfigureStorageDrsForPod_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2125,7 +2265,7 @@ func ConfigureStorageDrsForPod_Task(ctx context.Context, r soap.RoundTripper, re type ConfigureVFlashResourceEx_TaskBody struct { Req *types.ConfigureVFlashResourceEx_Task `xml:"urn:vim25 ConfigureVFlashResourceEx_Task,omitempty"` - Res *types.ConfigureVFlashResourceEx_TaskResponse `xml:"urn:vim25 ConfigureVFlashResourceEx_TaskResponse,omitempty"` + Res *types.ConfigureVFlashResourceEx_TaskResponse `xml:"ConfigureVFlashResourceEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2143,9 +2283,49 @@ func ConfigureVFlashResourceEx_Task(ctx context.Context, r soap.RoundTripper, re return resBody.Res, nil } +type ConnectNvmeControllerBody struct { + Req *types.ConnectNvmeController `xml:"urn:vim25 ConnectNvmeController,omitempty"` + Res *types.ConnectNvmeControllerResponse `xml:"ConnectNvmeControllerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConnectNvmeControllerBody) Fault() *soap.Fault { return b.Fault_ } + +func ConnectNvmeController(ctx context.Context, r soap.RoundTripper, req *types.ConnectNvmeController) (*types.ConnectNvmeControllerResponse, error) { + var reqBody, resBody ConnectNvmeControllerBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type ConnectNvmeControllerEx_TaskBody struct { + Req *types.ConnectNvmeControllerEx_Task `xml:"urn:vim25 ConnectNvmeControllerEx_Task,omitempty"` + Res *types.ConnectNvmeControllerEx_TaskResponse `xml:"ConnectNvmeControllerEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ConnectNvmeControllerEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ConnectNvmeControllerEx_Task(ctx context.Context, r soap.RoundTripper, req *types.ConnectNvmeControllerEx_Task) (*types.ConnectNvmeControllerEx_TaskResponse, error) { + var reqBody, resBody ConnectNvmeControllerEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ConsolidateVMDisks_TaskBody struct { Req *types.ConsolidateVMDisks_Task `xml:"urn:vim25 ConsolidateVMDisks_Task,omitempty"` - Res *types.ConsolidateVMDisks_TaskResponse `xml:"urn:vim25 ConsolidateVMDisks_TaskResponse,omitempty"` + Res *types.ConsolidateVMDisks_TaskResponse `xml:"ConsolidateVMDisks_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2165,7 +2345,7 @@ func ConsolidateVMDisks_Task(ctx context.Context, r soap.RoundTripper, req *type type ContinueRetrievePropertiesExBody struct { Req *types.ContinueRetrievePropertiesEx `xml:"urn:vim25 ContinueRetrievePropertiesEx,omitempty"` - Res *types.ContinueRetrievePropertiesExResponse `xml:"urn:vim25 ContinueRetrievePropertiesExResponse,omitempty"` + Res *types.ContinueRetrievePropertiesExResponse `xml:"ContinueRetrievePropertiesExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2185,7 +2365,7 @@ func ContinueRetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req type ConvertNamespacePathToUuidPathBody struct { Req *types.ConvertNamespacePathToUuidPath `xml:"urn:vim25 ConvertNamespacePathToUuidPath,omitempty"` - Res *types.ConvertNamespacePathToUuidPathResponse `xml:"urn:vim25 ConvertNamespacePathToUuidPathResponse,omitempty"` + Res *types.ConvertNamespacePathToUuidPathResponse `xml:"ConvertNamespacePathToUuidPathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2205,7 +2385,7 @@ func ConvertNamespacePathToUuidPath(ctx context.Context, r soap.RoundTripper, re type CopyDatastoreFile_TaskBody struct { Req *types.CopyDatastoreFile_Task `xml:"urn:vim25 CopyDatastoreFile_Task,omitempty"` - Res *types.CopyDatastoreFile_TaskResponse `xml:"urn:vim25 CopyDatastoreFile_TaskResponse,omitempty"` + Res *types.CopyDatastoreFile_TaskResponse `xml:"CopyDatastoreFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2225,7 +2405,7 @@ func CopyDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types type CopyVirtualDisk_TaskBody struct { Req *types.CopyVirtualDisk_Task `xml:"urn:vim25 CopyVirtualDisk_Task,omitempty"` - Res *types.CopyVirtualDisk_TaskResponse `xml:"urn:vim25 CopyVirtualDisk_TaskResponse,omitempty"` + Res *types.CopyVirtualDisk_TaskResponse `xml:"CopyVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2245,7 +2425,7 @@ func CopyVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.C type CreateAlarmBody struct { Req *types.CreateAlarm `xml:"urn:vim25 CreateAlarm,omitempty"` - Res *types.CreateAlarmResponse `xml:"urn:vim25 CreateAlarmResponse,omitempty"` + Res *types.CreateAlarmResponse `xml:"CreateAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2265,7 +2445,7 @@ func CreateAlarm(ctx context.Context, r soap.RoundTripper, req *types.CreateAlar type CreateChildVM_TaskBody struct { Req *types.CreateChildVM_Task `xml:"urn:vim25 CreateChildVM_Task,omitempty"` - Res *types.CreateChildVM_TaskResponse `xml:"urn:vim25 CreateChildVM_TaskResponse,omitempty"` + Res *types.CreateChildVM_TaskResponse `xml:"CreateChildVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2285,7 +2465,7 @@ func CreateChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Cre type CreateClusterBody struct { Req *types.CreateCluster `xml:"urn:vim25 CreateCluster,omitempty"` - Res *types.CreateClusterResponse `xml:"urn:vim25 CreateClusterResponse,omitempty"` + Res *types.CreateClusterResponse `xml:"CreateClusterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2305,7 +2485,7 @@ func CreateCluster(ctx context.Context, r soap.RoundTripper, req *types.CreateCl type CreateClusterExBody struct { Req *types.CreateClusterEx `xml:"urn:vim25 CreateClusterEx,omitempty"` - Res *types.CreateClusterExResponse `xml:"urn:vim25 CreateClusterExResponse,omitempty"` + Res *types.CreateClusterExResponse `xml:"CreateClusterExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2325,7 +2505,7 @@ func CreateClusterEx(ctx context.Context, r soap.RoundTripper, req *types.Create type CreateCollectorForEventsBody struct { Req *types.CreateCollectorForEvents `xml:"urn:vim25 CreateCollectorForEvents,omitempty"` - Res *types.CreateCollectorForEventsResponse `xml:"urn:vim25 CreateCollectorForEventsResponse,omitempty"` + Res *types.CreateCollectorForEventsResponse `xml:"CreateCollectorForEventsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2345,7 +2525,7 @@ func CreateCollectorForEvents(ctx context.Context, r soap.RoundTripper, req *typ type CreateCollectorForTasksBody struct { Req *types.CreateCollectorForTasks `xml:"urn:vim25 CreateCollectorForTasks,omitempty"` - Res *types.CreateCollectorForTasksResponse `xml:"urn:vim25 CreateCollectorForTasksResponse,omitempty"` + Res *types.CreateCollectorForTasksResponse `xml:"CreateCollectorForTasksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2365,7 +2545,7 @@ func CreateCollectorForTasks(ctx context.Context, r soap.RoundTripper, req *type type CreateContainerViewBody struct { Req *types.CreateContainerView `xml:"urn:vim25 CreateContainerView,omitempty"` - Res *types.CreateContainerViewResponse `xml:"urn:vim25 CreateContainerViewResponse,omitempty"` + Res *types.CreateContainerViewResponse `xml:"CreateContainerViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2385,7 +2565,7 @@ func CreateContainerView(ctx context.Context, r soap.RoundTripper, req *types.Cr type CreateCustomizationSpecBody struct { Req *types.CreateCustomizationSpec `xml:"urn:vim25 CreateCustomizationSpec,omitempty"` - Res *types.CreateCustomizationSpecResponse `xml:"urn:vim25 CreateCustomizationSpecResponse,omitempty"` + Res *types.CreateCustomizationSpecResponse `xml:"CreateCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2405,7 +2585,7 @@ func CreateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *type type CreateDVPortgroup_TaskBody struct { Req *types.CreateDVPortgroup_Task `xml:"urn:vim25 CreateDVPortgroup_Task,omitempty"` - Res *types.CreateDVPortgroup_TaskResponse `xml:"urn:vim25 CreateDVPortgroup_TaskResponse,omitempty"` + Res *types.CreateDVPortgroup_TaskResponse `xml:"CreateDVPortgroup_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2425,7 +2605,7 @@ func CreateDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req *types type CreateDVS_TaskBody struct { Req *types.CreateDVS_Task `xml:"urn:vim25 CreateDVS_Task,omitempty"` - Res *types.CreateDVS_TaskResponse `xml:"urn:vim25 CreateDVS_TaskResponse,omitempty"` + Res *types.CreateDVS_TaskResponse `xml:"CreateDVS_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2445,7 +2625,7 @@ func CreateDVS_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateD type CreateDatacenterBody struct { Req *types.CreateDatacenter `xml:"urn:vim25 CreateDatacenter,omitempty"` - Res *types.CreateDatacenterResponse `xml:"urn:vim25 CreateDatacenterResponse,omitempty"` + Res *types.CreateDatacenterResponse `xml:"CreateDatacenterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2465,7 +2645,7 @@ func CreateDatacenter(ctx context.Context, r soap.RoundTripper, req *types.Creat type CreateDefaultProfileBody struct { Req *types.CreateDefaultProfile `xml:"urn:vim25 CreateDefaultProfile,omitempty"` - Res *types.CreateDefaultProfileResponse `xml:"urn:vim25 CreateDefaultProfileResponse,omitempty"` + Res *types.CreateDefaultProfileResponse `xml:"CreateDefaultProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2485,7 +2665,7 @@ func CreateDefaultProfile(ctx context.Context, r soap.RoundTripper, req *types.C type CreateDescriptorBody struct { Req *types.CreateDescriptor `xml:"urn:vim25 CreateDescriptor,omitempty"` - Res *types.CreateDescriptorResponse `xml:"urn:vim25 CreateDescriptorResponse,omitempty"` + Res *types.CreateDescriptorResponse `xml:"CreateDescriptorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2505,7 +2685,7 @@ func CreateDescriptor(ctx context.Context, r soap.RoundTripper, req *types.Creat type CreateDiagnosticPartitionBody struct { Req *types.CreateDiagnosticPartition `xml:"urn:vim25 CreateDiagnosticPartition,omitempty"` - Res *types.CreateDiagnosticPartitionResponse `xml:"urn:vim25 CreateDiagnosticPartitionResponse,omitempty"` + Res *types.CreateDiagnosticPartitionResponse `xml:"CreateDiagnosticPartitionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2525,7 +2705,7 @@ func CreateDiagnosticPartition(ctx context.Context, r soap.RoundTripper, req *ty type CreateDirectoryBody struct { Req *types.CreateDirectory `xml:"urn:vim25 CreateDirectory,omitempty"` - Res *types.CreateDirectoryResponse `xml:"urn:vim25 CreateDirectoryResponse,omitempty"` + Res *types.CreateDirectoryResponse `xml:"CreateDirectoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2545,7 +2725,7 @@ func CreateDirectory(ctx context.Context, r soap.RoundTripper, req *types.Create type CreateDiskFromSnapshot_TaskBody struct { Req *types.CreateDiskFromSnapshot_Task `xml:"urn:vim25 CreateDiskFromSnapshot_Task,omitempty"` - Res *types.CreateDiskFromSnapshot_TaskResponse `xml:"urn:vim25 CreateDiskFromSnapshot_TaskResponse,omitempty"` + Res *types.CreateDiskFromSnapshot_TaskResponse `xml:"CreateDiskFromSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2565,7 +2745,7 @@ func CreateDiskFromSnapshot_Task(ctx context.Context, r soap.RoundTripper, req * type CreateDisk_TaskBody struct { Req *types.CreateDisk_Task `xml:"urn:vim25 CreateDisk_Task,omitempty"` - Res *types.CreateDisk_TaskResponse `xml:"urn:vim25 CreateDisk_TaskResponse,omitempty"` + Res *types.CreateDisk_TaskResponse `xml:"CreateDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2585,7 +2765,7 @@ func CreateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Create type CreateFilterBody struct { Req *types.CreateFilter `xml:"urn:vim25 CreateFilter,omitempty"` - Res *types.CreateFilterResponse `xml:"urn:vim25 CreateFilterResponse,omitempty"` + Res *types.CreateFilterResponse `xml:"CreateFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2605,7 +2785,7 @@ func CreateFilter(ctx context.Context, r soap.RoundTripper, req *types.CreateFil type CreateFolderBody struct { Req *types.CreateFolder `xml:"urn:vim25 CreateFolder,omitempty"` - Res *types.CreateFolderResponse `xml:"urn:vim25 CreateFolderResponse,omitempty"` + Res *types.CreateFolderResponse `xml:"CreateFolderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2625,7 +2805,7 @@ func CreateFolder(ctx context.Context, r soap.RoundTripper, req *types.CreateFol type CreateGroupBody struct { Req *types.CreateGroup `xml:"urn:vim25 CreateGroup,omitempty"` - Res *types.CreateGroupResponse `xml:"urn:vim25 CreateGroupResponse,omitempty"` + Res *types.CreateGroupResponse `xml:"CreateGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2645,7 +2825,7 @@ func CreateGroup(ctx context.Context, r soap.RoundTripper, req *types.CreateGrou type CreateImportSpecBody struct { Req *types.CreateImportSpec `xml:"urn:vim25 CreateImportSpec,omitempty"` - Res *types.CreateImportSpecResponse `xml:"urn:vim25 CreateImportSpecResponse,omitempty"` + Res *types.CreateImportSpecResponse `xml:"CreateImportSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2665,7 +2845,7 @@ func CreateImportSpec(ctx context.Context, r soap.RoundTripper, req *types.Creat type CreateInventoryViewBody struct { Req *types.CreateInventoryView `xml:"urn:vim25 CreateInventoryView,omitempty"` - Res *types.CreateInventoryViewResponse `xml:"urn:vim25 CreateInventoryViewResponse,omitempty"` + Res *types.CreateInventoryViewResponse `xml:"CreateInventoryViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2685,7 +2865,7 @@ func CreateInventoryView(ctx context.Context, r soap.RoundTripper, req *types.Cr type CreateIpPoolBody struct { Req *types.CreateIpPool `xml:"urn:vim25 CreateIpPool,omitempty"` - Res *types.CreateIpPoolResponse `xml:"urn:vim25 CreateIpPoolResponse,omitempty"` + Res *types.CreateIpPoolResponse `xml:"CreateIpPoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2705,7 +2885,7 @@ func CreateIpPool(ctx context.Context, r soap.RoundTripper, req *types.CreateIpP type CreateListViewBody struct { Req *types.CreateListView `xml:"urn:vim25 CreateListView,omitempty"` - Res *types.CreateListViewResponse `xml:"urn:vim25 CreateListViewResponse,omitempty"` + Res *types.CreateListViewResponse `xml:"CreateListViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2725,7 +2905,7 @@ func CreateListView(ctx context.Context, r soap.RoundTripper, req *types.CreateL type CreateListViewFromViewBody struct { Req *types.CreateListViewFromView `xml:"urn:vim25 CreateListViewFromView,omitempty"` - Res *types.CreateListViewFromViewResponse `xml:"urn:vim25 CreateListViewFromViewResponse,omitempty"` + Res *types.CreateListViewFromViewResponse `xml:"CreateListViewFromViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2745,7 +2925,7 @@ func CreateListViewFromView(ctx context.Context, r soap.RoundTripper, req *types type CreateLocalDatastoreBody struct { Req *types.CreateLocalDatastore `xml:"urn:vim25 CreateLocalDatastore,omitempty"` - Res *types.CreateLocalDatastoreResponse `xml:"urn:vim25 CreateLocalDatastoreResponse,omitempty"` + Res *types.CreateLocalDatastoreResponse `xml:"CreateLocalDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2765,7 +2945,7 @@ func CreateLocalDatastore(ctx context.Context, r soap.RoundTripper, req *types.C type CreateNasDatastoreBody struct { Req *types.CreateNasDatastore `xml:"urn:vim25 CreateNasDatastore,omitempty"` - Res *types.CreateNasDatastoreResponse `xml:"urn:vim25 CreateNasDatastoreResponse,omitempty"` + Res *types.CreateNasDatastoreResponse `xml:"CreateNasDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2785,7 +2965,7 @@ func CreateNasDatastore(ctx context.Context, r soap.RoundTripper, req *types.Cre type CreateNvdimmNamespace_TaskBody struct { Req *types.CreateNvdimmNamespace_Task `xml:"urn:vim25 CreateNvdimmNamespace_Task,omitempty"` - Res *types.CreateNvdimmNamespace_TaskResponse `xml:"urn:vim25 CreateNvdimmNamespace_TaskResponse,omitempty"` + Res *types.CreateNvdimmNamespace_TaskResponse `xml:"CreateNvdimmNamespace_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2803,9 +2983,49 @@ func CreateNvdimmNamespace_Task(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type CreateNvdimmPMemNamespace_TaskBody struct { + Req *types.CreateNvdimmPMemNamespace_Task `xml:"urn:vim25 CreateNvdimmPMemNamespace_Task,omitempty"` + Res *types.CreateNvdimmPMemNamespace_TaskResponse `xml:"CreateNvdimmPMemNamespace_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateNvdimmPMemNamespace_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateNvdimmPMemNamespace_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateNvdimmPMemNamespace_Task) (*types.CreateNvdimmPMemNamespace_TaskResponse, error) { + var reqBody, resBody CreateNvdimmPMemNamespace_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type CreateNvmeOverRdmaAdapterBody struct { + Req *types.CreateNvmeOverRdmaAdapter `xml:"urn:vim25 CreateNvmeOverRdmaAdapter,omitempty"` + Res *types.CreateNvmeOverRdmaAdapterResponse `xml:"CreateNvmeOverRdmaAdapterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateNvmeOverRdmaAdapterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateNvmeOverRdmaAdapter(ctx context.Context, r soap.RoundTripper, req *types.CreateNvmeOverRdmaAdapter) (*types.CreateNvmeOverRdmaAdapterResponse, error) { + var reqBody, resBody CreateNvmeOverRdmaAdapterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CreateObjectScheduledTaskBody struct { Req *types.CreateObjectScheduledTask `xml:"urn:vim25 CreateObjectScheduledTask,omitempty"` - Res *types.CreateObjectScheduledTaskResponse `xml:"urn:vim25 CreateObjectScheduledTaskResponse,omitempty"` + Res *types.CreateObjectScheduledTaskResponse `xml:"CreateObjectScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2825,7 +3045,7 @@ func CreateObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req *ty type CreatePerfIntervalBody struct { Req *types.CreatePerfInterval `xml:"urn:vim25 CreatePerfInterval,omitempty"` - Res *types.CreatePerfIntervalResponse `xml:"urn:vim25 CreatePerfIntervalResponse,omitempty"` + Res *types.CreatePerfIntervalResponse `xml:"CreatePerfIntervalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2845,7 +3065,7 @@ func CreatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.Cre type CreateProfileBody struct { Req *types.CreateProfile `xml:"urn:vim25 CreateProfile,omitempty"` - Res *types.CreateProfileResponse `xml:"urn:vim25 CreateProfileResponse,omitempty"` + Res *types.CreateProfileResponse `xml:"CreateProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2865,7 +3085,7 @@ func CreateProfile(ctx context.Context, r soap.RoundTripper, req *types.CreatePr type CreatePropertyCollectorBody struct { Req *types.CreatePropertyCollector `xml:"urn:vim25 CreatePropertyCollector,omitempty"` - Res *types.CreatePropertyCollectorResponse `xml:"urn:vim25 CreatePropertyCollectorResponse,omitempty"` + Res *types.CreatePropertyCollectorResponse `xml:"CreatePropertyCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2885,7 +3105,7 @@ func CreatePropertyCollector(ctx context.Context, r soap.RoundTripper, req *type type CreateRegistryKeyInGuestBody struct { Req *types.CreateRegistryKeyInGuest `xml:"urn:vim25 CreateRegistryKeyInGuest,omitempty"` - Res *types.CreateRegistryKeyInGuestResponse `xml:"urn:vim25 CreateRegistryKeyInGuestResponse,omitempty"` + Res *types.CreateRegistryKeyInGuestResponse `xml:"CreateRegistryKeyInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2905,7 +3125,7 @@ func CreateRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *typ type CreateResourcePoolBody struct { Req *types.CreateResourcePool `xml:"urn:vim25 CreateResourcePool,omitempty"` - Res *types.CreateResourcePoolResponse `xml:"urn:vim25 CreateResourcePoolResponse,omitempty"` + Res *types.CreateResourcePoolResponse `xml:"CreateResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2925,7 +3145,7 @@ func CreateResourcePool(ctx context.Context, r soap.RoundTripper, req *types.Cre type CreateScheduledTaskBody struct { Req *types.CreateScheduledTask `xml:"urn:vim25 CreateScheduledTask,omitempty"` - Res *types.CreateScheduledTaskResponse `xml:"urn:vim25 CreateScheduledTaskResponse,omitempty"` + Res *types.CreateScheduledTaskResponse `xml:"CreateScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2945,7 +3165,7 @@ func CreateScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.Cr type CreateScreenshot_TaskBody struct { Req *types.CreateScreenshot_Task `xml:"urn:vim25 CreateScreenshot_Task,omitempty"` - Res *types.CreateScreenshot_TaskResponse `xml:"urn:vim25 CreateScreenshot_TaskResponse,omitempty"` + Res *types.CreateScreenshot_TaskResponse `xml:"CreateScreenshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2965,7 +3185,7 @@ func CreateScreenshot_Task(ctx context.Context, r soap.RoundTripper, req *types. type CreateSecondaryVMEx_TaskBody struct { Req *types.CreateSecondaryVMEx_Task `xml:"urn:vim25 CreateSecondaryVMEx_Task,omitempty"` - Res *types.CreateSecondaryVMEx_TaskResponse `xml:"urn:vim25 CreateSecondaryVMEx_TaskResponse,omitempty"` + Res *types.CreateSecondaryVMEx_TaskResponse `xml:"CreateSecondaryVMEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -2985,7 +3205,7 @@ func CreateSecondaryVMEx_Task(ctx context.Context, r soap.RoundTripper, req *typ type CreateSecondaryVM_TaskBody struct { Req *types.CreateSecondaryVM_Task `xml:"urn:vim25 CreateSecondaryVM_Task,omitempty"` - Res *types.CreateSecondaryVM_TaskResponse `xml:"urn:vim25 CreateSecondaryVM_TaskResponse,omitempty"` + Res *types.CreateSecondaryVM_TaskResponse `xml:"CreateSecondaryVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3005,7 +3225,7 @@ func CreateSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types type CreateSnapshotEx_TaskBody struct { Req *types.CreateSnapshotEx_Task `xml:"urn:vim25 CreateSnapshotEx_Task,omitempty"` - Res *types.CreateSnapshotEx_TaskResponse `xml:"urn:vim25 CreateSnapshotEx_TaskResponse,omitempty"` + Res *types.CreateSnapshotEx_TaskResponse `xml:"CreateSnapshotEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3025,7 +3245,7 @@ func CreateSnapshotEx_Task(ctx context.Context, r soap.RoundTripper, req *types. type CreateSnapshot_TaskBody struct { Req *types.CreateSnapshot_Task `xml:"urn:vim25 CreateSnapshot_Task,omitempty"` - Res *types.CreateSnapshot_TaskResponse `xml:"urn:vim25 CreateSnapshot_TaskResponse,omitempty"` + Res *types.CreateSnapshot_TaskResponse `xml:"CreateSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3043,9 +3263,29 @@ func CreateSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.Cr return resBody.Res, nil } +type CreateSoftwareAdapterBody struct { + Req *types.CreateSoftwareAdapter `xml:"urn:vim25 CreateSoftwareAdapter,omitempty"` + Res *types.CreateSoftwareAdapterResponse `xml:"CreateSoftwareAdapterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateSoftwareAdapterBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateSoftwareAdapter(ctx context.Context, r soap.RoundTripper, req *types.CreateSoftwareAdapter) (*types.CreateSoftwareAdapterResponse, error) { + var reqBody, resBody CreateSoftwareAdapterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CreateStoragePodBody struct { Req *types.CreateStoragePod `xml:"urn:vim25 CreateStoragePod,omitempty"` - Res *types.CreateStoragePodResponse `xml:"urn:vim25 CreateStoragePodResponse,omitempty"` + Res *types.CreateStoragePodResponse `xml:"CreateStoragePodResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3065,7 +3305,7 @@ func CreateStoragePod(ctx context.Context, r soap.RoundTripper, req *types.Creat type CreateTaskBody struct { Req *types.CreateTask `xml:"urn:vim25 CreateTask,omitempty"` - Res *types.CreateTaskResponse `xml:"urn:vim25 CreateTaskResponse,omitempty"` + Res *types.CreateTaskResponse `xml:"CreateTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3085,7 +3325,7 @@ func CreateTask(ctx context.Context, r soap.RoundTripper, req *types.CreateTask) type CreateTemporaryDirectoryInGuestBody struct { Req *types.CreateTemporaryDirectoryInGuest `xml:"urn:vim25 CreateTemporaryDirectoryInGuest,omitempty"` - Res *types.CreateTemporaryDirectoryInGuestResponse `xml:"urn:vim25 CreateTemporaryDirectoryInGuestResponse,omitempty"` + Res *types.CreateTemporaryDirectoryInGuestResponse `xml:"CreateTemporaryDirectoryInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3105,7 +3345,7 @@ func CreateTemporaryDirectoryInGuest(ctx context.Context, r soap.RoundTripper, r type CreateTemporaryFileInGuestBody struct { Req *types.CreateTemporaryFileInGuest `xml:"urn:vim25 CreateTemporaryFileInGuest,omitempty"` - Res *types.CreateTemporaryFileInGuestResponse `xml:"urn:vim25 CreateTemporaryFileInGuestResponse,omitempty"` + Res *types.CreateTemporaryFileInGuestResponse `xml:"CreateTemporaryFileInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3125,7 +3365,7 @@ func CreateTemporaryFileInGuest(ctx context.Context, r soap.RoundTripper, req *t type CreateUserBody struct { Req *types.CreateUser `xml:"urn:vim25 CreateUser,omitempty"` - Res *types.CreateUserResponse `xml:"urn:vim25 CreateUserResponse,omitempty"` + Res *types.CreateUserResponse `xml:"CreateUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3145,7 +3385,7 @@ func CreateUser(ctx context.Context, r soap.RoundTripper, req *types.CreateUser) type CreateVAppBody struct { Req *types.CreateVApp `xml:"urn:vim25 CreateVApp,omitempty"` - Res *types.CreateVAppResponse `xml:"urn:vim25 CreateVAppResponse,omitempty"` + Res *types.CreateVAppResponse `xml:"CreateVAppResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3165,7 +3405,7 @@ func CreateVApp(ctx context.Context, r soap.RoundTripper, req *types.CreateVApp) type CreateVM_TaskBody struct { Req *types.CreateVM_Task `xml:"urn:vim25 CreateVM_Task,omitempty"` - Res *types.CreateVM_TaskResponse `xml:"urn:vim25 CreateVM_TaskResponse,omitempty"` + Res *types.CreateVM_TaskResponse `xml:"CreateVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3185,7 +3425,7 @@ func CreateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.CreateVM type CreateVirtualDisk_TaskBody struct { Req *types.CreateVirtualDisk_Task `xml:"urn:vim25 CreateVirtualDisk_Task,omitempty"` - Res *types.CreateVirtualDisk_TaskResponse `xml:"urn:vim25 CreateVirtualDisk_TaskResponse,omitempty"` + Res *types.CreateVirtualDisk_TaskResponse `xml:"CreateVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3205,7 +3445,7 @@ func CreateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types type CreateVmfsDatastoreBody struct { Req *types.CreateVmfsDatastore `xml:"urn:vim25 CreateVmfsDatastore,omitempty"` - Res *types.CreateVmfsDatastoreResponse `xml:"urn:vim25 CreateVmfsDatastoreResponse,omitempty"` + Res *types.CreateVmfsDatastoreResponse `xml:"CreateVmfsDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3225,7 +3465,7 @@ func CreateVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.Cr type CreateVvolDatastoreBody struct { Req *types.CreateVvolDatastore `xml:"urn:vim25 CreateVvolDatastore,omitempty"` - Res *types.CreateVvolDatastoreResponse `xml:"urn:vim25 CreateVvolDatastoreResponse,omitempty"` + Res *types.CreateVvolDatastoreResponse `xml:"CreateVvolDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3243,9 +3483,29 @@ func CreateVvolDatastore(ctx context.Context, r soap.RoundTripper, req *types.Cr return resBody.Res, nil } +type CryptoManagerHostDisableBody struct { + Req *types.CryptoManagerHostDisable `xml:"urn:vim25 CryptoManagerHostDisable,omitempty"` + Res *types.CryptoManagerHostDisableResponse `xml:"CryptoManagerHostDisableResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CryptoManagerHostDisableBody) Fault() *soap.Fault { return b.Fault_ } + +func CryptoManagerHostDisable(ctx context.Context, r soap.RoundTripper, req *types.CryptoManagerHostDisable) (*types.CryptoManagerHostDisableResponse, error) { + var reqBody, resBody CryptoManagerHostDisableBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CryptoManagerHostEnableBody struct { Req *types.CryptoManagerHostEnable `xml:"urn:vim25 CryptoManagerHostEnable,omitempty"` - Res *types.CryptoManagerHostEnableResponse `xml:"urn:vim25 CryptoManagerHostEnableResponse,omitempty"` + Res *types.CryptoManagerHostEnableResponse `xml:"CryptoManagerHostEnableResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3265,7 +3525,7 @@ func CryptoManagerHostEnable(ctx context.Context, r soap.RoundTripper, req *type type CryptoManagerHostPrepareBody struct { Req *types.CryptoManagerHostPrepare `xml:"urn:vim25 CryptoManagerHostPrepare,omitempty"` - Res *types.CryptoManagerHostPrepareResponse `xml:"urn:vim25 CryptoManagerHostPrepareResponse,omitempty"` + Res *types.CryptoManagerHostPrepareResponse `xml:"CryptoManagerHostPrepareResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3285,7 +3545,7 @@ func CryptoManagerHostPrepare(ctx context.Context, r soap.RoundTripper, req *typ type CryptoUnlock_TaskBody struct { Req *types.CryptoUnlock_Task `xml:"urn:vim25 CryptoUnlock_Task,omitempty"` - Res *types.CryptoUnlock_TaskResponse `xml:"urn:vim25 CryptoUnlock_TaskResponse,omitempty"` + Res *types.CryptoUnlock_TaskResponse `xml:"CryptoUnlock_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3305,7 +3565,7 @@ func CryptoUnlock_Task(ctx context.Context, r soap.RoundTripper, req *types.Cryp type CurrentTimeBody struct { Req *types.CurrentTime `xml:"urn:vim25 CurrentTime,omitempty"` - Res *types.CurrentTimeResponse `xml:"urn:vim25 CurrentTimeResponse,omitempty"` + Res *types.CurrentTimeResponse `xml:"CurrentTimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3325,7 +3585,7 @@ func CurrentTime(ctx context.Context, r soap.RoundTripper, req *types.CurrentTim type CustomizationSpecItemToXmlBody struct { Req *types.CustomizationSpecItemToXml `xml:"urn:vim25 CustomizationSpecItemToXml,omitempty"` - Res *types.CustomizationSpecItemToXmlResponse `xml:"urn:vim25 CustomizationSpecItemToXmlResponse,omitempty"` + Res *types.CustomizationSpecItemToXmlResponse `xml:"CustomizationSpecItemToXmlResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3343,9 +3603,29 @@ func CustomizationSpecItemToXml(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type CustomizeGuest_TaskBody struct { + Req *types.CustomizeGuest_Task `xml:"urn:vim25 CustomizeGuest_Task,omitempty"` + Res *types.CustomizeGuest_TaskResponse `xml:"CustomizeGuest_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CustomizeGuest_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func CustomizeGuest_Task(ctx context.Context, r soap.RoundTripper, req *types.CustomizeGuest_Task) (*types.CustomizeGuest_TaskResponse, error) { + var reqBody, resBody CustomizeGuest_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CustomizeVM_TaskBody struct { Req *types.CustomizeVM_Task `xml:"urn:vim25 CustomizeVM_Task,omitempty"` - Res *types.CustomizeVM_TaskResponse `xml:"urn:vim25 CustomizeVM_TaskResponse,omitempty"` + Res *types.CustomizeVM_TaskResponse `xml:"CustomizeVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3365,7 +3645,7 @@ func CustomizeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Custo type DVPortgroupRollback_TaskBody struct { Req *types.DVPortgroupRollback_Task `xml:"urn:vim25 DVPortgroupRollback_Task,omitempty"` - Res *types.DVPortgroupRollback_TaskResponse `xml:"urn:vim25 DVPortgroupRollback_TaskResponse,omitempty"` + Res *types.DVPortgroupRollback_TaskResponse `xml:"DVPortgroupRollback_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3385,7 +3665,7 @@ func DVPortgroupRollback_Task(ctx context.Context, r soap.RoundTripper, req *typ type DVSManagerExportEntity_TaskBody struct { Req *types.DVSManagerExportEntity_Task `xml:"urn:vim25 DVSManagerExportEntity_Task,omitempty"` - Res *types.DVSManagerExportEntity_TaskResponse `xml:"urn:vim25 DVSManagerExportEntity_TaskResponse,omitempty"` + Res *types.DVSManagerExportEntity_TaskResponse `xml:"DVSManagerExportEntity_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3405,7 +3685,7 @@ func DVSManagerExportEntity_Task(ctx context.Context, r soap.RoundTripper, req * type DVSManagerImportEntity_TaskBody struct { Req *types.DVSManagerImportEntity_Task `xml:"urn:vim25 DVSManagerImportEntity_Task,omitempty"` - Res *types.DVSManagerImportEntity_TaskResponse `xml:"urn:vim25 DVSManagerImportEntity_TaskResponse,omitempty"` + Res *types.DVSManagerImportEntity_TaskResponse `xml:"DVSManagerImportEntity_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3425,7 +3705,7 @@ func DVSManagerImportEntity_Task(ctx context.Context, r soap.RoundTripper, req * type DVSManagerLookupDvPortGroupBody struct { Req *types.DVSManagerLookupDvPortGroup `xml:"urn:vim25 DVSManagerLookupDvPortGroup,omitempty"` - Res *types.DVSManagerLookupDvPortGroupResponse `xml:"urn:vim25 DVSManagerLookupDvPortGroupResponse,omitempty"` + Res *types.DVSManagerLookupDvPortGroupResponse `xml:"DVSManagerLookupDvPortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3445,7 +3725,7 @@ func DVSManagerLookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req * type DVSRollback_TaskBody struct { Req *types.DVSRollback_Task `xml:"urn:vim25 DVSRollback_Task,omitempty"` - Res *types.DVSRollback_TaskResponse `xml:"urn:vim25 DVSRollback_TaskResponse,omitempty"` + Res *types.DVSRollback_TaskResponse `xml:"DVSRollback_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3465,7 +3745,7 @@ func DVSRollback_Task(ctx context.Context, r soap.RoundTripper, req *types.DVSRo type DatastoreEnterMaintenanceModeBody struct { Req *types.DatastoreEnterMaintenanceMode `xml:"urn:vim25 DatastoreEnterMaintenanceMode,omitempty"` - Res *types.DatastoreEnterMaintenanceModeResponse `xml:"urn:vim25 DatastoreEnterMaintenanceModeResponse,omitempty"` + Res *types.DatastoreEnterMaintenanceModeResponse `xml:"DatastoreEnterMaintenanceModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3485,7 +3765,7 @@ func DatastoreEnterMaintenanceMode(ctx context.Context, r soap.RoundTripper, req type DatastoreExitMaintenanceMode_TaskBody struct { Req *types.DatastoreExitMaintenanceMode_Task `xml:"urn:vim25 DatastoreExitMaintenanceMode_Task,omitempty"` - Res *types.DatastoreExitMaintenanceMode_TaskResponse `xml:"urn:vim25 DatastoreExitMaintenanceMode_TaskResponse,omitempty"` + Res *types.DatastoreExitMaintenanceMode_TaskResponse `xml:"DatastoreExitMaintenanceMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3505,7 +3785,7 @@ func DatastoreExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, type DecodeLicenseBody struct { Req *types.DecodeLicense `xml:"urn:vim25 DecodeLicense,omitempty"` - Res *types.DecodeLicenseResponse `xml:"urn:vim25 DecodeLicenseResponse,omitempty"` + Res *types.DecodeLicenseResponse `xml:"DecodeLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3525,7 +3805,7 @@ func DecodeLicense(ctx context.Context, r soap.RoundTripper, req *types.DecodeLi type DefragmentAllDisksBody struct { Req *types.DefragmentAllDisks `xml:"urn:vim25 DefragmentAllDisks,omitempty"` - Res *types.DefragmentAllDisksResponse `xml:"urn:vim25 DefragmentAllDisksResponse,omitempty"` + Res *types.DefragmentAllDisksResponse `xml:"DefragmentAllDisksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3545,7 +3825,7 @@ func DefragmentAllDisks(ctx context.Context, r soap.RoundTripper, req *types.Def type DefragmentVirtualDisk_TaskBody struct { Req *types.DefragmentVirtualDisk_Task `xml:"urn:vim25 DefragmentVirtualDisk_Task,omitempty"` - Res *types.DefragmentVirtualDisk_TaskResponse `xml:"urn:vim25 DefragmentVirtualDisk_TaskResponse,omitempty"` + Res *types.DefragmentVirtualDisk_TaskResponse `xml:"DefragmentVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3565,7 +3845,7 @@ func DefragmentVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *t type DeleteCustomizationSpecBody struct { Req *types.DeleteCustomizationSpec `xml:"urn:vim25 DeleteCustomizationSpec,omitempty"` - Res *types.DeleteCustomizationSpecResponse `xml:"urn:vim25 DeleteCustomizationSpecResponse,omitempty"` + Res *types.DeleteCustomizationSpecResponse `xml:"DeleteCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3585,7 +3865,7 @@ func DeleteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *type type DeleteDatastoreFile_TaskBody struct { Req *types.DeleteDatastoreFile_Task `xml:"urn:vim25 DeleteDatastoreFile_Task,omitempty"` - Res *types.DeleteDatastoreFile_TaskResponse `xml:"urn:vim25 DeleteDatastoreFile_TaskResponse,omitempty"` + Res *types.DeleteDatastoreFile_TaskResponse `xml:"DeleteDatastoreFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3605,7 +3885,7 @@ func DeleteDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *typ type DeleteDirectoryBody struct { Req *types.DeleteDirectory `xml:"urn:vim25 DeleteDirectory,omitempty"` - Res *types.DeleteDirectoryResponse `xml:"urn:vim25 DeleteDirectoryResponse,omitempty"` + Res *types.DeleteDirectoryResponse `xml:"DeleteDirectoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3625,7 +3905,7 @@ func DeleteDirectory(ctx context.Context, r soap.RoundTripper, req *types.Delete type DeleteDirectoryInGuestBody struct { Req *types.DeleteDirectoryInGuest `xml:"urn:vim25 DeleteDirectoryInGuest,omitempty"` - Res *types.DeleteDirectoryInGuestResponse `xml:"urn:vim25 DeleteDirectoryInGuestResponse,omitempty"` + Res *types.DeleteDirectoryInGuestResponse `xml:"DeleteDirectoryInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3645,7 +3925,7 @@ func DeleteDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types type DeleteFileBody struct { Req *types.DeleteFile `xml:"urn:vim25 DeleteFile,omitempty"` - Res *types.DeleteFileResponse `xml:"urn:vim25 DeleteFileResponse,omitempty"` + Res *types.DeleteFileResponse `xml:"DeleteFileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3665,7 +3945,7 @@ func DeleteFile(ctx context.Context, r soap.RoundTripper, req *types.DeleteFile) type DeleteFileInGuestBody struct { Req *types.DeleteFileInGuest `xml:"urn:vim25 DeleteFileInGuest,omitempty"` - Res *types.DeleteFileInGuestResponse `xml:"urn:vim25 DeleteFileInGuestResponse,omitempty"` + Res *types.DeleteFileInGuestResponse `xml:"DeleteFileInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3685,7 +3965,7 @@ func DeleteFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.Dele type DeleteHostSpecificationBody struct { Req *types.DeleteHostSpecification `xml:"urn:vim25 DeleteHostSpecification,omitempty"` - Res *types.DeleteHostSpecificationResponse `xml:"urn:vim25 DeleteHostSpecificationResponse,omitempty"` + Res *types.DeleteHostSpecificationResponse `xml:"DeleteHostSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3705,7 +3985,7 @@ func DeleteHostSpecification(ctx context.Context, r soap.RoundTripper, req *type type DeleteHostSubSpecificationBody struct { Req *types.DeleteHostSubSpecification `xml:"urn:vim25 DeleteHostSubSpecification,omitempty"` - Res *types.DeleteHostSubSpecificationResponse `xml:"urn:vim25 DeleteHostSubSpecificationResponse,omitempty"` + Res *types.DeleteHostSubSpecificationResponse `xml:"DeleteHostSubSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3725,7 +4005,7 @@ func DeleteHostSubSpecification(ctx context.Context, r soap.RoundTripper, req *t type DeleteNvdimmBlockNamespaces_TaskBody struct { Req *types.DeleteNvdimmBlockNamespaces_Task `xml:"urn:vim25 DeleteNvdimmBlockNamespaces_Task,omitempty"` - Res *types.DeleteNvdimmBlockNamespaces_TaskResponse `xml:"urn:vim25 DeleteNvdimmBlockNamespaces_TaskResponse,omitempty"` + Res *types.DeleteNvdimmBlockNamespaces_TaskResponse `xml:"DeleteNvdimmBlockNamespaces_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3745,7 +4025,7 @@ func DeleteNvdimmBlockNamespaces_Task(ctx context.Context, r soap.RoundTripper, type DeleteNvdimmNamespace_TaskBody struct { Req *types.DeleteNvdimmNamespace_Task `xml:"urn:vim25 DeleteNvdimmNamespace_Task,omitempty"` - Res *types.DeleteNvdimmNamespace_TaskResponse `xml:"urn:vim25 DeleteNvdimmNamespace_TaskResponse,omitempty"` + Res *types.DeleteNvdimmNamespace_TaskResponse `xml:"DeleteNvdimmNamespace_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3765,7 +4045,7 @@ func DeleteNvdimmNamespace_Task(ctx context.Context, r soap.RoundTripper, req *t type DeleteRegistryKeyInGuestBody struct { Req *types.DeleteRegistryKeyInGuest `xml:"urn:vim25 DeleteRegistryKeyInGuest,omitempty"` - Res *types.DeleteRegistryKeyInGuestResponse `xml:"urn:vim25 DeleteRegistryKeyInGuestResponse,omitempty"` + Res *types.DeleteRegistryKeyInGuestResponse `xml:"DeleteRegistryKeyInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3785,7 +4065,7 @@ func DeleteRegistryKeyInGuest(ctx context.Context, r soap.RoundTripper, req *typ type DeleteRegistryValueInGuestBody struct { Req *types.DeleteRegistryValueInGuest `xml:"urn:vim25 DeleteRegistryValueInGuest,omitempty"` - Res *types.DeleteRegistryValueInGuestResponse `xml:"urn:vim25 DeleteRegistryValueInGuestResponse,omitempty"` + Res *types.DeleteRegistryValueInGuestResponse `xml:"DeleteRegistryValueInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3805,7 +4085,7 @@ func DeleteRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *t type DeleteScsiLunStateBody struct { Req *types.DeleteScsiLunState `xml:"urn:vim25 DeleteScsiLunState,omitempty"` - Res *types.DeleteScsiLunStateResponse `xml:"urn:vim25 DeleteScsiLunStateResponse,omitempty"` + Res *types.DeleteScsiLunStateResponse `xml:"DeleteScsiLunStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3825,7 +4105,7 @@ func DeleteScsiLunState(ctx context.Context, r soap.RoundTripper, req *types.Del type DeleteSnapshot_TaskBody struct { Req *types.DeleteSnapshot_Task `xml:"urn:vim25 DeleteSnapshot_Task,omitempty"` - Res *types.DeleteSnapshot_TaskResponse `xml:"urn:vim25 DeleteSnapshot_TaskResponse,omitempty"` + Res *types.DeleteSnapshot_TaskResponse `xml:"DeleteSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3843,9 +4123,29 @@ func DeleteSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.De return resBody.Res, nil } +type DeleteVStorageObjectEx_TaskBody struct { + Req *types.DeleteVStorageObjectEx_Task `xml:"urn:vim25 DeleteVStorageObjectEx_Task,omitempty"` + Res *types.DeleteVStorageObjectEx_TaskResponse `xml:"DeleteVStorageObjectEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DeleteVStorageObjectEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func DeleteVStorageObjectEx_Task(ctx context.Context, r soap.RoundTripper, req *types.DeleteVStorageObjectEx_Task) (*types.DeleteVStorageObjectEx_TaskResponse, error) { + var reqBody, resBody DeleteVStorageObjectEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type DeleteVStorageObject_TaskBody struct { Req *types.DeleteVStorageObject_Task `xml:"urn:vim25 DeleteVStorageObject_Task,omitempty"` - Res *types.DeleteVStorageObject_TaskResponse `xml:"urn:vim25 DeleteVStorageObject_TaskResponse,omitempty"` + Res *types.DeleteVStorageObject_TaskResponse `xml:"DeleteVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3865,7 +4165,7 @@ func DeleteVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *ty type DeleteVffsVolumeStateBody struct { Req *types.DeleteVffsVolumeState `xml:"urn:vim25 DeleteVffsVolumeState,omitempty"` - Res *types.DeleteVffsVolumeStateResponse `xml:"urn:vim25 DeleteVffsVolumeStateResponse,omitempty"` + Res *types.DeleteVffsVolumeStateResponse `xml:"DeleteVffsVolumeStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3885,7 +4185,7 @@ func DeleteVffsVolumeState(ctx context.Context, r soap.RoundTripper, req *types. type DeleteVirtualDisk_TaskBody struct { Req *types.DeleteVirtualDisk_Task `xml:"urn:vim25 DeleteVirtualDisk_Task,omitempty"` - Res *types.DeleteVirtualDisk_TaskResponse `xml:"urn:vim25 DeleteVirtualDisk_TaskResponse,omitempty"` + Res *types.DeleteVirtualDisk_TaskResponse `xml:"DeleteVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3905,7 +4205,7 @@ func DeleteVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types type DeleteVmfsVolumeStateBody struct { Req *types.DeleteVmfsVolumeState `xml:"urn:vim25 DeleteVmfsVolumeState,omitempty"` - Res *types.DeleteVmfsVolumeStateResponse `xml:"urn:vim25 DeleteVmfsVolumeStateResponse,omitempty"` + Res *types.DeleteVmfsVolumeStateResponse `xml:"DeleteVmfsVolumeStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3925,7 +4225,7 @@ func DeleteVmfsVolumeState(ctx context.Context, r soap.RoundTripper, req *types. type DeleteVsanObjectsBody struct { Req *types.DeleteVsanObjects `xml:"urn:vim25 DeleteVsanObjects,omitempty"` - Res *types.DeleteVsanObjectsResponse `xml:"urn:vim25 DeleteVsanObjectsResponse,omitempty"` + Res *types.DeleteVsanObjectsResponse `xml:"DeleteVsanObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3945,7 +4245,7 @@ func DeleteVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.Dele type DeselectVnicBody struct { Req *types.DeselectVnic `xml:"urn:vim25 DeselectVnic,omitempty"` - Res *types.DeselectVnicResponse `xml:"urn:vim25 DeselectVnicResponse,omitempty"` + Res *types.DeselectVnicResponse `xml:"DeselectVnicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3965,7 +4265,7 @@ func DeselectVnic(ctx context.Context, r soap.RoundTripper, req *types.DeselectV type DeselectVnicForNicTypeBody struct { Req *types.DeselectVnicForNicType `xml:"urn:vim25 DeselectVnicForNicType,omitempty"` - Res *types.DeselectVnicForNicTypeResponse `xml:"urn:vim25 DeselectVnicForNicTypeResponse,omitempty"` + Res *types.DeselectVnicForNicTypeResponse `xml:"DeselectVnicForNicTypeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -3985,7 +4285,7 @@ func DeselectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types type DestroyChildrenBody struct { Req *types.DestroyChildren `xml:"urn:vim25 DestroyChildren,omitempty"` - Res *types.DestroyChildrenResponse `xml:"urn:vim25 DestroyChildrenResponse,omitempty"` + Res *types.DestroyChildrenResponse `xml:"DestroyChildrenResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4005,7 +4305,7 @@ func DestroyChildren(ctx context.Context, r soap.RoundTripper, req *types.Destro type DestroyCollectorBody struct { Req *types.DestroyCollector `xml:"urn:vim25 DestroyCollector,omitempty"` - Res *types.DestroyCollectorResponse `xml:"urn:vim25 DestroyCollectorResponse,omitempty"` + Res *types.DestroyCollectorResponse `xml:"DestroyCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4025,7 +4325,7 @@ func DestroyCollector(ctx context.Context, r soap.RoundTripper, req *types.Destr type DestroyDatastoreBody struct { Req *types.DestroyDatastore `xml:"urn:vim25 DestroyDatastore,omitempty"` - Res *types.DestroyDatastoreResponse `xml:"urn:vim25 DestroyDatastoreResponse,omitempty"` + Res *types.DestroyDatastoreResponse `xml:"DestroyDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4045,7 +4345,7 @@ func DestroyDatastore(ctx context.Context, r soap.RoundTripper, req *types.Destr type DestroyIpPoolBody struct { Req *types.DestroyIpPool `xml:"urn:vim25 DestroyIpPool,omitempty"` - Res *types.DestroyIpPoolResponse `xml:"urn:vim25 DestroyIpPoolResponse,omitempty"` + Res *types.DestroyIpPoolResponse `xml:"DestroyIpPoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4065,7 +4365,7 @@ func DestroyIpPool(ctx context.Context, r soap.RoundTripper, req *types.DestroyI type DestroyNetworkBody struct { Req *types.DestroyNetwork `xml:"urn:vim25 DestroyNetwork,omitempty"` - Res *types.DestroyNetworkResponse `xml:"urn:vim25 DestroyNetworkResponse,omitempty"` + Res *types.DestroyNetworkResponse `xml:"DestroyNetworkResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4085,7 +4385,7 @@ func DestroyNetwork(ctx context.Context, r soap.RoundTripper, req *types.Destroy type DestroyProfileBody struct { Req *types.DestroyProfile `xml:"urn:vim25 DestroyProfile,omitempty"` - Res *types.DestroyProfileResponse `xml:"urn:vim25 DestroyProfileResponse,omitempty"` + Res *types.DestroyProfileResponse `xml:"DestroyProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4105,7 +4405,7 @@ func DestroyProfile(ctx context.Context, r soap.RoundTripper, req *types.Destroy type DestroyPropertyCollectorBody struct { Req *types.DestroyPropertyCollector `xml:"urn:vim25 DestroyPropertyCollector,omitempty"` - Res *types.DestroyPropertyCollectorResponse `xml:"urn:vim25 DestroyPropertyCollectorResponse,omitempty"` + Res *types.DestroyPropertyCollectorResponse `xml:"DestroyPropertyCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4125,7 +4425,7 @@ func DestroyPropertyCollector(ctx context.Context, r soap.RoundTripper, req *typ type DestroyPropertyFilterBody struct { Req *types.DestroyPropertyFilter `xml:"urn:vim25 DestroyPropertyFilter,omitempty"` - Res *types.DestroyPropertyFilterResponse `xml:"urn:vim25 DestroyPropertyFilterResponse,omitempty"` + Res *types.DestroyPropertyFilterResponse `xml:"DestroyPropertyFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4145,7 +4445,7 @@ func DestroyPropertyFilter(ctx context.Context, r soap.RoundTripper, req *types. type DestroyVffsBody struct { Req *types.DestroyVffs `xml:"urn:vim25 DestroyVffs,omitempty"` - Res *types.DestroyVffsResponse `xml:"urn:vim25 DestroyVffsResponse,omitempty"` + Res *types.DestroyVffsResponse `xml:"DestroyVffsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4165,7 +4465,7 @@ func DestroyVffs(ctx context.Context, r soap.RoundTripper, req *types.DestroyVff type DestroyViewBody struct { Req *types.DestroyView `xml:"urn:vim25 DestroyView,omitempty"` - Res *types.DestroyViewResponse `xml:"urn:vim25 DestroyViewResponse,omitempty"` + Res *types.DestroyViewResponse `xml:"DestroyViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4185,7 +4485,7 @@ func DestroyView(ctx context.Context, r soap.RoundTripper, req *types.DestroyVie type Destroy_TaskBody struct { Req *types.Destroy_Task `xml:"urn:vim25 Destroy_Task,omitempty"` - Res *types.Destroy_TaskResponse `xml:"urn:vim25 Destroy_TaskResponse,omitempty"` + Res *types.Destroy_TaskResponse `xml:"Destroy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4205,7 +4505,7 @@ func Destroy_Task(ctx context.Context, r soap.RoundTripper, req *types.Destroy_T type DetachDisk_TaskBody struct { Req *types.DetachDisk_Task `xml:"urn:vim25 DetachDisk_Task,omitempty"` - Res *types.DetachDisk_TaskResponse `xml:"urn:vim25 DetachDisk_TaskResponse,omitempty"` + Res *types.DetachDisk_TaskResponse `xml:"DetachDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4225,7 +4525,7 @@ func DetachDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Detach type DetachScsiLunBody struct { Req *types.DetachScsiLun `xml:"urn:vim25 DetachScsiLun,omitempty"` - Res *types.DetachScsiLunResponse `xml:"urn:vim25 DetachScsiLunResponse,omitempty"` + Res *types.DetachScsiLunResponse `xml:"DetachScsiLunResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4245,7 +4545,7 @@ func DetachScsiLun(ctx context.Context, r soap.RoundTripper, req *types.DetachSc type DetachScsiLunEx_TaskBody struct { Req *types.DetachScsiLunEx_Task `xml:"urn:vim25 DetachScsiLunEx_Task,omitempty"` - Res *types.DetachScsiLunEx_TaskResponse `xml:"urn:vim25 DetachScsiLunEx_TaskResponse,omitempty"` + Res *types.DetachScsiLunEx_TaskResponse `xml:"DetachScsiLunEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4265,7 +4565,7 @@ func DetachScsiLunEx_Task(ctx context.Context, r soap.RoundTripper, req *types.D type DetachTagFromVStorageObjectBody struct { Req *types.DetachTagFromVStorageObject `xml:"urn:vim25 DetachTagFromVStorageObject,omitempty"` - Res *types.DetachTagFromVStorageObjectResponse `xml:"urn:vim25 DetachTagFromVStorageObjectResponse,omitempty"` + Res *types.DetachTagFromVStorageObjectResponse `xml:"DetachTagFromVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4283,9 +4583,49 @@ func DetachTagFromVStorageObject(ctx context.Context, r soap.RoundTripper, req * return resBody.Res, nil } +type DisableAlarmBody struct { + Req *types.DisableAlarm `xml:"urn:vim25 DisableAlarm,omitempty"` + Res *types.DisableAlarmResponse `xml:"DisableAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableAlarm(ctx context.Context, r soap.RoundTripper, req *types.DisableAlarm) (*types.DisableAlarmResponse, error) { + var reqBody, resBody DisableAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DisableClusteredVmdkSupportBody struct { + Req *types.DisableClusteredVmdkSupport `xml:"urn:vim25 DisableClusteredVmdkSupport,omitempty"` + Res *types.DisableClusteredVmdkSupportResponse `xml:"DisableClusteredVmdkSupportResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DisableClusteredVmdkSupportBody) Fault() *soap.Fault { return b.Fault_ } + +func DisableClusteredVmdkSupport(ctx context.Context, r soap.RoundTripper, req *types.DisableClusteredVmdkSupport) (*types.DisableClusteredVmdkSupportResponse, error) { + var reqBody, resBody DisableClusteredVmdkSupportBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type DisableEvcMode_TaskBody struct { Req *types.DisableEvcMode_Task `xml:"urn:vim25 DisableEvcMode_Task,omitempty"` - Res *types.DisableEvcMode_TaskResponse `xml:"urn:vim25 DisableEvcMode_TaskResponse,omitempty"` + Res *types.DisableEvcMode_TaskResponse `xml:"DisableEvcMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4305,7 +4645,7 @@ func DisableEvcMode_Task(ctx context.Context, r soap.RoundTripper, req *types.Di type DisableFeatureBody struct { Req *types.DisableFeature `xml:"urn:vim25 DisableFeature,omitempty"` - Res *types.DisableFeatureResponse `xml:"urn:vim25 DisableFeatureResponse,omitempty"` + Res *types.DisableFeatureResponse `xml:"DisableFeatureResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4325,7 +4665,7 @@ func DisableFeature(ctx context.Context, r soap.RoundTripper, req *types.Disable type DisableHyperThreadingBody struct { Req *types.DisableHyperThreading `xml:"urn:vim25 DisableHyperThreading,omitempty"` - Res *types.DisableHyperThreadingResponse `xml:"urn:vim25 DisableHyperThreadingResponse,omitempty"` + Res *types.DisableHyperThreadingResponse `xml:"DisableHyperThreadingResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4345,7 +4685,7 @@ func DisableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types. type DisableMultipathPathBody struct { Req *types.DisableMultipathPath `xml:"urn:vim25 DisableMultipathPath,omitempty"` - Res *types.DisableMultipathPathResponse `xml:"urn:vim25 DisableMultipathPathResponse,omitempty"` + Res *types.DisableMultipathPathResponse `xml:"DisableMultipathPathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4365,7 +4705,7 @@ func DisableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.D type DisableRulesetBody struct { Req *types.DisableRuleset `xml:"urn:vim25 DisableRuleset,omitempty"` - Res *types.DisableRulesetResponse `xml:"urn:vim25 DisableRulesetResponse,omitempty"` + Res *types.DisableRulesetResponse `xml:"DisableRulesetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4385,7 +4725,7 @@ func DisableRuleset(ctx context.Context, r soap.RoundTripper, req *types.Disable type DisableSecondaryVM_TaskBody struct { Req *types.DisableSecondaryVM_Task `xml:"urn:vim25 DisableSecondaryVM_Task,omitempty"` - Res *types.DisableSecondaryVM_TaskResponse `xml:"urn:vim25 DisableSecondaryVM_TaskResponse,omitempty"` + Res *types.DisableSecondaryVM_TaskResponse `xml:"DisableSecondaryVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4405,7 +4745,7 @@ func DisableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *type type DisableSmartCardAuthenticationBody struct { Req *types.DisableSmartCardAuthentication `xml:"urn:vim25 DisableSmartCardAuthentication,omitempty"` - Res *types.DisableSmartCardAuthenticationResponse `xml:"urn:vim25 DisableSmartCardAuthenticationResponse,omitempty"` + Res *types.DisableSmartCardAuthenticationResponse `xml:"DisableSmartCardAuthenticationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4425,7 +4765,7 @@ func DisableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, re type DisconnectHost_TaskBody struct { Req *types.DisconnectHost_Task `xml:"urn:vim25 DisconnectHost_Task,omitempty"` - Res *types.DisconnectHost_TaskResponse `xml:"urn:vim25 DisconnectHost_TaskResponse,omitempty"` + Res *types.DisconnectHost_TaskResponse `xml:"DisconnectHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4443,16 +4783,16 @@ func DisconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Di return resBody.Res, nil } -type DiscoverFcoeHbasBody struct { - Req *types.DiscoverFcoeHbas `xml:"urn:vim25 DiscoverFcoeHbas,omitempty"` - Res *types.DiscoverFcoeHbasResponse `xml:"urn:vim25 DiscoverFcoeHbasResponse,omitempty"` - Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +type DisconnectNvmeControllerBody struct { + Req *types.DisconnectNvmeController `xml:"urn:vim25 DisconnectNvmeController,omitempty"` + Res *types.DisconnectNvmeControllerResponse `xml:"DisconnectNvmeControllerResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } -func (b *DiscoverFcoeHbasBody) Fault() *soap.Fault { return b.Fault_ } +func (b *DisconnectNvmeControllerBody) Fault() *soap.Fault { return b.Fault_ } -func DiscoverFcoeHbas(ctx context.Context, r soap.RoundTripper, req *types.DiscoverFcoeHbas) (*types.DiscoverFcoeHbasResponse, error) { - var reqBody, resBody DiscoverFcoeHbasBody +func DisconnectNvmeController(ctx context.Context, r soap.RoundTripper, req *types.DisconnectNvmeController) (*types.DisconnectNvmeControllerResponse, error) { + var reqBody, resBody DisconnectNvmeControllerBody reqBody.Req = req @@ -4463,16 +4803,16 @@ func DiscoverFcoeHbas(ctx context.Context, r soap.RoundTripper, req *types.Disco return resBody.Res, nil } -type DissociateProfileBody struct { - Req *types.DissociateProfile `xml:"urn:vim25 DissociateProfile,omitempty"` - Res *types.DissociateProfileResponse `xml:"urn:vim25 DissociateProfileResponse,omitempty"` - Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +type DisconnectNvmeControllerEx_TaskBody struct { + Req *types.DisconnectNvmeControllerEx_Task `xml:"urn:vim25 DisconnectNvmeControllerEx_Task,omitempty"` + Res *types.DisconnectNvmeControllerEx_TaskResponse `xml:"DisconnectNvmeControllerEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } -func (b *DissociateProfileBody) Fault() *soap.Fault { return b.Fault_ } +func (b *DisconnectNvmeControllerEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } -func DissociateProfile(ctx context.Context, r soap.RoundTripper, req *types.DissociateProfile) (*types.DissociateProfileResponse, error) { - var reqBody, resBody DissociateProfileBody +func DisconnectNvmeControllerEx_Task(ctx context.Context, r soap.RoundTripper, req *types.DisconnectNvmeControllerEx_Task) (*types.DisconnectNvmeControllerEx_TaskResponse, error) { + var reqBody, resBody DisconnectNvmeControllerEx_TaskBody reqBody.Req = req @@ -4483,11 +4823,71 @@ func DissociateProfile(ctx context.Context, r soap.RoundTripper, req *types.Diss return resBody.Res, nil } -type DoesCustomizationSpecExistBody struct { - Req *types.DoesCustomizationSpecExist `xml:"urn:vim25 DoesCustomizationSpecExist,omitempty"` - Res *types.DoesCustomizationSpecExistResponse `xml:"urn:vim25 DoesCustomizationSpecExistResponse,omitempty"` - Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` -} +type DiscoverFcoeHbasBody struct { + Req *types.DiscoverFcoeHbas `xml:"urn:vim25 DiscoverFcoeHbas,omitempty"` + Res *types.DiscoverFcoeHbasResponse `xml:"DiscoverFcoeHbasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DiscoverFcoeHbasBody) Fault() *soap.Fault { return b.Fault_ } + +func DiscoverFcoeHbas(ctx context.Context, r soap.RoundTripper, req *types.DiscoverFcoeHbas) (*types.DiscoverFcoeHbasResponse, error) { + var reqBody, resBody DiscoverFcoeHbasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DiscoverNvmeControllersBody struct { + Req *types.DiscoverNvmeControllers `xml:"urn:vim25 DiscoverNvmeControllers,omitempty"` + Res *types.DiscoverNvmeControllersResponse `xml:"DiscoverNvmeControllersResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DiscoverNvmeControllersBody) Fault() *soap.Fault { return b.Fault_ } + +func DiscoverNvmeControllers(ctx context.Context, r soap.RoundTripper, req *types.DiscoverNvmeControllers) (*types.DiscoverNvmeControllersResponse, error) { + var reqBody, resBody DiscoverNvmeControllersBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DissociateProfileBody struct { + Req *types.DissociateProfile `xml:"urn:vim25 DissociateProfile,omitempty"` + Res *types.DissociateProfileResponse `xml:"DissociateProfileResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DissociateProfileBody) Fault() *soap.Fault { return b.Fault_ } + +func DissociateProfile(ctx context.Context, r soap.RoundTripper, req *types.DissociateProfile) (*types.DissociateProfileResponse, error) { + var reqBody, resBody DissociateProfileBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DoesCustomizationSpecExistBody struct { + Req *types.DoesCustomizationSpecExist `xml:"urn:vim25 DoesCustomizationSpecExist,omitempty"` + Res *types.DoesCustomizationSpecExistResponse `xml:"DoesCustomizationSpecExistResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} func (b *DoesCustomizationSpecExistBody) Fault() *soap.Fault { return b.Fault_ } @@ -4503,9 +4903,49 @@ func DoesCustomizationSpecExist(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type DownloadDescriptionTreeBody struct { + Req *types.DownloadDescriptionTree `xml:"urn:vim25 DownloadDescriptionTree,omitempty"` + Res *types.DownloadDescriptionTreeResponse `xml:"DownloadDescriptionTreeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DownloadDescriptionTreeBody) Fault() *soap.Fault { return b.Fault_ } + +func DownloadDescriptionTree(ctx context.Context, r soap.RoundTripper, req *types.DownloadDescriptionTree) (*types.DownloadDescriptionTreeResponse, error) { + var reqBody, resBody DownloadDescriptionTreeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type DropConnectionsBody struct { + Req *types.DropConnections `xml:"urn:vim25 DropConnections,omitempty"` + Res *types.DropConnectionsResponse `xml:"DropConnectionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *DropConnectionsBody) Fault() *soap.Fault { return b.Fault_ } + +func DropConnections(ctx context.Context, r soap.RoundTripper, req *types.DropConnections) (*types.DropConnectionsResponse, error) { + var reqBody, resBody DropConnectionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type DuplicateCustomizationSpecBody struct { Req *types.DuplicateCustomizationSpec `xml:"urn:vim25 DuplicateCustomizationSpec,omitempty"` - Res *types.DuplicateCustomizationSpecResponse `xml:"urn:vim25 DuplicateCustomizationSpecResponse,omitempty"` + Res *types.DuplicateCustomizationSpecResponse `xml:"DuplicateCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4525,7 +4965,7 @@ func DuplicateCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *t type DvsReconfigureVmVnicNetworkResourcePool_TaskBody struct { Req *types.DvsReconfigureVmVnicNetworkResourcePool_Task `xml:"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_Task,omitempty"` - Res *types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse `xml:"urn:vim25 DvsReconfigureVmVnicNetworkResourcePool_TaskResponse,omitempty"` + Res *types.DvsReconfigureVmVnicNetworkResourcePool_TaskResponse `xml:"DvsReconfigureVmVnicNetworkResourcePool_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4545,7 +4985,7 @@ func DvsReconfigureVmVnicNetworkResourcePool_Task(ctx context.Context, r soap.Ro type EagerZeroVirtualDisk_TaskBody struct { Req *types.EagerZeroVirtualDisk_Task `xml:"urn:vim25 EagerZeroVirtualDisk_Task,omitempty"` - Res *types.EagerZeroVirtualDisk_TaskResponse `xml:"urn:vim25 EagerZeroVirtualDisk_TaskResponse,omitempty"` + Res *types.EagerZeroVirtualDisk_TaskResponse `xml:"EagerZeroVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4563,9 +5003,29 @@ func EagerZeroVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *ty return resBody.Res, nil } +type EnableAlarmBody struct { + Req *types.EnableAlarm `xml:"urn:vim25 EnableAlarm,omitempty"` + Res *types.EnableAlarmResponse `xml:"EnableAlarmResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableAlarmBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableAlarm(ctx context.Context, r soap.RoundTripper, req *types.EnableAlarm) (*types.EnableAlarmResponse, error) { + var reqBody, resBody EnableAlarmBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type EnableAlarmActionsBody struct { Req *types.EnableAlarmActions `xml:"urn:vim25 EnableAlarmActions,omitempty"` - Res *types.EnableAlarmActionsResponse `xml:"urn:vim25 EnableAlarmActionsResponse,omitempty"` + Res *types.EnableAlarmActionsResponse `xml:"EnableAlarmActionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4583,9 +5043,29 @@ func EnableAlarmActions(ctx context.Context, r soap.RoundTripper, req *types.Ena return resBody.Res, nil } +type EnableClusteredVmdkSupportBody struct { + Req *types.EnableClusteredVmdkSupport `xml:"urn:vim25 EnableClusteredVmdkSupport,omitempty"` + Res *types.EnableClusteredVmdkSupportResponse `xml:"EnableClusteredVmdkSupportResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *EnableClusteredVmdkSupportBody) Fault() *soap.Fault { return b.Fault_ } + +func EnableClusteredVmdkSupport(ctx context.Context, r soap.RoundTripper, req *types.EnableClusteredVmdkSupport) (*types.EnableClusteredVmdkSupportResponse, error) { + var reqBody, resBody EnableClusteredVmdkSupportBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type EnableCryptoBody struct { Req *types.EnableCrypto `xml:"urn:vim25 EnableCrypto,omitempty"` - Res *types.EnableCryptoResponse `xml:"urn:vim25 EnableCryptoResponse,omitempty"` + Res *types.EnableCryptoResponse `xml:"EnableCryptoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4605,7 +5085,7 @@ func EnableCrypto(ctx context.Context, r soap.RoundTripper, req *types.EnableCry type EnableFeatureBody struct { Req *types.EnableFeature `xml:"urn:vim25 EnableFeature,omitempty"` - Res *types.EnableFeatureResponse `xml:"urn:vim25 EnableFeatureResponse,omitempty"` + Res *types.EnableFeatureResponse `xml:"EnableFeatureResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4625,7 +5105,7 @@ func EnableFeature(ctx context.Context, r soap.RoundTripper, req *types.EnableFe type EnableHyperThreadingBody struct { Req *types.EnableHyperThreading `xml:"urn:vim25 EnableHyperThreading,omitempty"` - Res *types.EnableHyperThreadingResponse `xml:"urn:vim25 EnableHyperThreadingResponse,omitempty"` + Res *types.EnableHyperThreadingResponse `xml:"EnableHyperThreadingResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4645,7 +5125,7 @@ func EnableHyperThreading(ctx context.Context, r soap.RoundTripper, req *types.E type EnableMultipathPathBody struct { Req *types.EnableMultipathPath `xml:"urn:vim25 EnableMultipathPath,omitempty"` - Res *types.EnableMultipathPathResponse `xml:"urn:vim25 EnableMultipathPathResponse,omitempty"` + Res *types.EnableMultipathPathResponse `xml:"EnableMultipathPathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4665,7 +5145,7 @@ func EnableMultipathPath(ctx context.Context, r soap.RoundTripper, req *types.En type EnableNetworkResourceManagementBody struct { Req *types.EnableNetworkResourceManagement `xml:"urn:vim25 EnableNetworkResourceManagement,omitempty"` - Res *types.EnableNetworkResourceManagementResponse `xml:"urn:vim25 EnableNetworkResourceManagementResponse,omitempty"` + Res *types.EnableNetworkResourceManagementResponse `xml:"EnableNetworkResourceManagementResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4685,7 +5165,7 @@ func EnableNetworkResourceManagement(ctx context.Context, r soap.RoundTripper, r type EnableRulesetBody struct { Req *types.EnableRuleset `xml:"urn:vim25 EnableRuleset,omitempty"` - Res *types.EnableRulesetResponse `xml:"urn:vim25 EnableRulesetResponse,omitempty"` + Res *types.EnableRulesetResponse `xml:"EnableRulesetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4705,7 +5185,7 @@ func EnableRuleset(ctx context.Context, r soap.RoundTripper, req *types.EnableRu type EnableSecondaryVM_TaskBody struct { Req *types.EnableSecondaryVM_Task `xml:"urn:vim25 EnableSecondaryVM_Task,omitempty"` - Res *types.EnableSecondaryVM_TaskResponse `xml:"urn:vim25 EnableSecondaryVM_TaskResponse,omitempty"` + Res *types.EnableSecondaryVM_TaskResponse `xml:"EnableSecondaryVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4725,7 +5205,7 @@ func EnableSecondaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types type EnableSmartCardAuthenticationBody struct { Req *types.EnableSmartCardAuthentication `xml:"urn:vim25 EnableSmartCardAuthentication,omitempty"` - Res *types.EnableSmartCardAuthenticationResponse `xml:"urn:vim25 EnableSmartCardAuthenticationResponse,omitempty"` + Res *types.EnableSmartCardAuthenticationResponse `xml:"EnableSmartCardAuthenticationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4745,7 +5225,7 @@ func EnableSmartCardAuthentication(ctx context.Context, r soap.RoundTripper, req type EnterLockdownModeBody struct { Req *types.EnterLockdownMode `xml:"urn:vim25 EnterLockdownMode,omitempty"` - Res *types.EnterLockdownModeResponse `xml:"urn:vim25 EnterLockdownModeResponse,omitempty"` + Res *types.EnterLockdownModeResponse `xml:"EnterLockdownModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4765,7 +5245,7 @@ func EnterLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.Ente type EnterMaintenanceMode_TaskBody struct { Req *types.EnterMaintenanceMode_Task `xml:"urn:vim25 EnterMaintenanceMode_Task,omitempty"` - Res *types.EnterMaintenanceMode_TaskResponse `xml:"urn:vim25 EnterMaintenanceMode_TaskResponse,omitempty"` + Res *types.EnterMaintenanceMode_TaskResponse `xml:"EnterMaintenanceMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4785,7 +5265,7 @@ func EnterMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *ty type EstimateDatabaseSizeBody struct { Req *types.EstimateDatabaseSize `xml:"urn:vim25 EstimateDatabaseSize,omitempty"` - Res *types.EstimateDatabaseSizeResponse `xml:"urn:vim25 EstimateDatabaseSizeResponse,omitempty"` + Res *types.EstimateDatabaseSizeResponse `xml:"EstimateDatabaseSizeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4805,7 +5285,7 @@ func EstimateDatabaseSize(ctx context.Context, r soap.RoundTripper, req *types.E type EstimateStorageForConsolidateSnapshots_TaskBody struct { Req *types.EstimateStorageForConsolidateSnapshots_Task `xml:"urn:vim25 EstimateStorageForConsolidateSnapshots_Task,omitempty"` - Res *types.EstimateStorageForConsolidateSnapshots_TaskResponse `xml:"urn:vim25 EstimateStorageForConsolidateSnapshots_TaskResponse,omitempty"` + Res *types.EstimateStorageForConsolidateSnapshots_TaskResponse `xml:"EstimateStorageForConsolidateSnapshots_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4825,7 +5305,7 @@ func EstimateStorageForConsolidateSnapshots_Task(ctx context.Context, r soap.Rou type EsxAgentHostManagerUpdateConfigBody struct { Req *types.EsxAgentHostManagerUpdateConfig `xml:"urn:vim25 EsxAgentHostManagerUpdateConfig,omitempty"` - Res *types.EsxAgentHostManagerUpdateConfigResponse `xml:"urn:vim25 EsxAgentHostManagerUpdateConfigResponse,omitempty"` + Res *types.EsxAgentHostManagerUpdateConfigResponse `xml:"EsxAgentHostManagerUpdateConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4845,7 +5325,7 @@ func EsxAgentHostManagerUpdateConfig(ctx context.Context, r soap.RoundTripper, r type EvacuateVsanNode_TaskBody struct { Req *types.EvacuateVsanNode_Task `xml:"urn:vim25 EvacuateVsanNode_Task,omitempty"` - Res *types.EvacuateVsanNode_TaskResponse `xml:"urn:vim25 EvacuateVsanNode_TaskResponse,omitempty"` + Res *types.EvacuateVsanNode_TaskResponse `xml:"EvacuateVsanNode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4865,7 +5345,7 @@ func EvacuateVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *types. type EvcManagerBody struct { Req *types.EvcManager `xml:"urn:vim25 EvcManager,omitempty"` - Res *types.EvcManagerResponse `xml:"urn:vim25 EvcManagerResponse,omitempty"` + Res *types.EvcManagerResponse `xml:"EvcManagerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4885,7 +5365,7 @@ func EvcManager(ctx context.Context, r soap.RoundTripper, req *types.EvcManager) type ExecuteHostProfileBody struct { Req *types.ExecuteHostProfile `xml:"urn:vim25 ExecuteHostProfile,omitempty"` - Res *types.ExecuteHostProfileResponse `xml:"urn:vim25 ExecuteHostProfileResponse,omitempty"` + Res *types.ExecuteHostProfileResponse `xml:"ExecuteHostProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4905,7 +5385,7 @@ func ExecuteHostProfile(ctx context.Context, r soap.RoundTripper, req *types.Exe type ExecuteSimpleCommandBody struct { Req *types.ExecuteSimpleCommand `xml:"urn:vim25 ExecuteSimpleCommand,omitempty"` - Res *types.ExecuteSimpleCommandResponse `xml:"urn:vim25 ExecuteSimpleCommandResponse,omitempty"` + Res *types.ExecuteSimpleCommandResponse `xml:"ExecuteSimpleCommandResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4925,7 +5405,7 @@ func ExecuteSimpleCommand(ctx context.Context, r soap.RoundTripper, req *types.E type ExitLockdownModeBody struct { Req *types.ExitLockdownMode `xml:"urn:vim25 ExitLockdownMode,omitempty"` - Res *types.ExitLockdownModeResponse `xml:"urn:vim25 ExitLockdownModeResponse,omitempty"` + Res *types.ExitLockdownModeResponse `xml:"ExitLockdownModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4945,7 +5425,7 @@ func ExitLockdownMode(ctx context.Context, r soap.RoundTripper, req *types.ExitL type ExitMaintenanceMode_TaskBody struct { Req *types.ExitMaintenanceMode_Task `xml:"urn:vim25 ExitMaintenanceMode_Task,omitempty"` - Res *types.ExitMaintenanceMode_TaskResponse `xml:"urn:vim25 ExitMaintenanceMode_TaskResponse,omitempty"` + Res *types.ExitMaintenanceMode_TaskResponse `xml:"ExitMaintenanceMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4965,7 +5445,7 @@ func ExitMaintenanceMode_Task(ctx context.Context, r soap.RoundTripper, req *typ type ExpandVmfsDatastoreBody struct { Req *types.ExpandVmfsDatastore `xml:"urn:vim25 ExpandVmfsDatastore,omitempty"` - Res *types.ExpandVmfsDatastoreResponse `xml:"urn:vim25 ExpandVmfsDatastoreResponse,omitempty"` + Res *types.ExpandVmfsDatastoreResponse `xml:"ExpandVmfsDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -4985,7 +5465,7 @@ func ExpandVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.Ex type ExpandVmfsExtentBody struct { Req *types.ExpandVmfsExtent `xml:"urn:vim25 ExpandVmfsExtent,omitempty"` - Res *types.ExpandVmfsExtentResponse `xml:"urn:vim25 ExpandVmfsExtentResponse,omitempty"` + Res *types.ExpandVmfsExtentResponse `xml:"ExpandVmfsExtentResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5005,7 +5485,7 @@ func ExpandVmfsExtent(ctx context.Context, r soap.RoundTripper, req *types.Expan type ExportAnswerFile_TaskBody struct { Req *types.ExportAnswerFile_Task `xml:"urn:vim25 ExportAnswerFile_Task,omitempty"` - Res *types.ExportAnswerFile_TaskResponse `xml:"urn:vim25 ExportAnswerFile_TaskResponse,omitempty"` + Res *types.ExportAnswerFile_TaskResponse `xml:"ExportAnswerFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5025,7 +5505,7 @@ func ExportAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types. type ExportProfileBody struct { Req *types.ExportProfile `xml:"urn:vim25 ExportProfile,omitempty"` - Res *types.ExportProfileResponse `xml:"urn:vim25 ExportProfileResponse,omitempty"` + Res *types.ExportProfileResponse `xml:"ExportProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5045,7 +5525,7 @@ func ExportProfile(ctx context.Context, r soap.RoundTripper, req *types.ExportPr type ExportSnapshotBody struct { Req *types.ExportSnapshot `xml:"urn:vim25 ExportSnapshot,omitempty"` - Res *types.ExportSnapshotResponse `xml:"urn:vim25 ExportSnapshotResponse,omitempty"` + Res *types.ExportSnapshotResponse `xml:"ExportSnapshotResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5065,7 +5545,7 @@ func ExportSnapshot(ctx context.Context, r soap.RoundTripper, req *types.ExportS type ExportVAppBody struct { Req *types.ExportVApp `xml:"urn:vim25 ExportVApp,omitempty"` - Res *types.ExportVAppResponse `xml:"urn:vim25 ExportVAppResponse,omitempty"` + Res *types.ExportVAppResponse `xml:"ExportVAppResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5085,7 +5565,7 @@ func ExportVApp(ctx context.Context, r soap.RoundTripper, req *types.ExportVApp) type ExportVmBody struct { Req *types.ExportVm `xml:"urn:vim25 ExportVm,omitempty"` - Res *types.ExportVmResponse `xml:"urn:vim25 ExportVmResponse,omitempty"` + Res *types.ExportVmResponse `xml:"ExportVmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5105,7 +5585,7 @@ func ExportVm(ctx context.Context, r soap.RoundTripper, req *types.ExportVm) (*t type ExtendDisk_TaskBody struct { Req *types.ExtendDisk_Task `xml:"urn:vim25 ExtendDisk_Task,omitempty"` - Res *types.ExtendDisk_TaskResponse `xml:"urn:vim25 ExtendDisk_TaskResponse,omitempty"` + Res *types.ExtendDisk_TaskResponse `xml:"ExtendDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5123,9 +5603,29 @@ func ExtendDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Extend return resBody.Res, nil } +type ExtendHCI_TaskBody struct { + Req *types.ExtendHCI_Task `xml:"urn:vim25 ExtendHCI_Task,omitempty"` + Res *types.ExtendHCI_TaskResponse `xml:"ExtendHCI_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ExtendHCI_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func ExtendHCI_Task(ctx context.Context, r soap.RoundTripper, req *types.ExtendHCI_Task) (*types.ExtendHCI_TaskResponse, error) { + var reqBody, resBody ExtendHCI_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ExtendVffsBody struct { Req *types.ExtendVffs `xml:"urn:vim25 ExtendVffs,omitempty"` - Res *types.ExtendVffsResponse `xml:"urn:vim25 ExtendVffsResponse,omitempty"` + Res *types.ExtendVffsResponse `xml:"ExtendVffsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5145,7 +5645,7 @@ func ExtendVffs(ctx context.Context, r soap.RoundTripper, req *types.ExtendVffs) type ExtendVirtualDisk_TaskBody struct { Req *types.ExtendVirtualDisk_Task `xml:"urn:vim25 ExtendVirtualDisk_Task,omitempty"` - Res *types.ExtendVirtualDisk_TaskResponse `xml:"urn:vim25 ExtendVirtualDisk_TaskResponse,omitempty"` + Res *types.ExtendVirtualDisk_TaskResponse `xml:"ExtendVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5165,7 +5665,7 @@ func ExtendVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types type ExtendVmfsDatastoreBody struct { Req *types.ExtendVmfsDatastore `xml:"urn:vim25 ExtendVmfsDatastore,omitempty"` - Res *types.ExtendVmfsDatastoreResponse `xml:"urn:vim25 ExtendVmfsDatastoreResponse,omitempty"` + Res *types.ExtendVmfsDatastoreResponse `xml:"ExtendVmfsDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5185,7 +5685,7 @@ func ExtendVmfsDatastore(ctx context.Context, r soap.RoundTripper, req *types.Ex type ExtractOvfEnvironmentBody struct { Req *types.ExtractOvfEnvironment `xml:"urn:vim25 ExtractOvfEnvironment,omitempty"` - Res *types.ExtractOvfEnvironmentResponse `xml:"urn:vim25 ExtractOvfEnvironmentResponse,omitempty"` + Res *types.ExtractOvfEnvironmentResponse `xml:"ExtractOvfEnvironmentResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5203,9 +5703,29 @@ func ExtractOvfEnvironment(ctx context.Context, r soap.RoundTripper, req *types. return resBody.Res, nil } +type FetchAuditRecordsBody struct { + Req *types.FetchAuditRecords `xml:"urn:vim25 FetchAuditRecords,omitempty"` + Res *types.FetchAuditRecordsResponse `xml:"FetchAuditRecordsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *FetchAuditRecordsBody) Fault() *soap.Fault { return b.Fault_ } + +func FetchAuditRecords(ctx context.Context, r soap.RoundTripper, req *types.FetchAuditRecords) (*types.FetchAuditRecordsResponse, error) { + var reqBody, resBody FetchAuditRecordsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type FetchDVPortKeysBody struct { Req *types.FetchDVPortKeys `xml:"urn:vim25 FetchDVPortKeys,omitempty"` - Res *types.FetchDVPortKeysResponse `xml:"urn:vim25 FetchDVPortKeysResponse,omitempty"` + Res *types.FetchDVPortKeysResponse `xml:"FetchDVPortKeysResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5225,7 +5745,7 @@ func FetchDVPortKeys(ctx context.Context, r soap.RoundTripper, req *types.FetchD type FetchDVPortsBody struct { Req *types.FetchDVPorts `xml:"urn:vim25 FetchDVPorts,omitempty"` - Res *types.FetchDVPortsResponse `xml:"urn:vim25 FetchDVPortsResponse,omitempty"` + Res *types.FetchDVPortsResponse `xml:"FetchDVPortsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5245,7 +5765,7 @@ func FetchDVPorts(ctx context.Context, r soap.RoundTripper, req *types.FetchDVPo type FetchSystemEventLogBody struct { Req *types.FetchSystemEventLog `xml:"urn:vim25 FetchSystemEventLog,omitempty"` - Res *types.FetchSystemEventLogResponse `xml:"urn:vim25 FetchSystemEventLogResponse,omitempty"` + Res *types.FetchSystemEventLogResponse `xml:"FetchSystemEventLogResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5265,7 +5785,7 @@ func FetchSystemEventLog(ctx context.Context, r soap.RoundTripper, req *types.Fe type FetchUserPrivilegeOnEntitiesBody struct { Req *types.FetchUserPrivilegeOnEntities `xml:"urn:vim25 FetchUserPrivilegeOnEntities,omitempty"` - Res *types.FetchUserPrivilegeOnEntitiesResponse `xml:"urn:vim25 FetchUserPrivilegeOnEntitiesResponse,omitempty"` + Res *types.FetchUserPrivilegeOnEntitiesResponse `xml:"FetchUserPrivilegeOnEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5285,7 +5805,7 @@ func FetchUserPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req type FindAllByDnsNameBody struct { Req *types.FindAllByDnsName `xml:"urn:vim25 FindAllByDnsName,omitempty"` - Res *types.FindAllByDnsNameResponse `xml:"urn:vim25 FindAllByDnsNameResponse,omitempty"` + Res *types.FindAllByDnsNameResponse `xml:"FindAllByDnsNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5305,7 +5825,7 @@ func FindAllByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindA type FindAllByIpBody struct { Req *types.FindAllByIp `xml:"urn:vim25 FindAllByIp,omitempty"` - Res *types.FindAllByIpResponse `xml:"urn:vim25 FindAllByIpResponse,omitempty"` + Res *types.FindAllByIpResponse `xml:"FindAllByIpResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5325,7 +5845,7 @@ func FindAllByIp(ctx context.Context, r soap.RoundTripper, req *types.FindAllByI type FindAllByUuidBody struct { Req *types.FindAllByUuid `xml:"urn:vim25 FindAllByUuid,omitempty"` - Res *types.FindAllByUuidResponse `xml:"urn:vim25 FindAllByUuidResponse,omitempty"` + Res *types.FindAllByUuidResponse `xml:"FindAllByUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5345,7 +5865,7 @@ func FindAllByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindAllB type FindAssociatedProfileBody struct { Req *types.FindAssociatedProfile `xml:"urn:vim25 FindAssociatedProfile,omitempty"` - Res *types.FindAssociatedProfileResponse `xml:"urn:vim25 FindAssociatedProfileResponse,omitempty"` + Res *types.FindAssociatedProfileResponse `xml:"FindAssociatedProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5365,7 +5885,7 @@ func FindAssociatedProfile(ctx context.Context, r soap.RoundTripper, req *types. type FindByDatastorePathBody struct { Req *types.FindByDatastorePath `xml:"urn:vim25 FindByDatastorePath,omitempty"` - Res *types.FindByDatastorePathResponse `xml:"urn:vim25 FindByDatastorePathResponse,omitempty"` + Res *types.FindByDatastorePathResponse `xml:"FindByDatastorePathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5385,7 +5905,7 @@ func FindByDatastorePath(ctx context.Context, r soap.RoundTripper, req *types.Fi type FindByDnsNameBody struct { Req *types.FindByDnsName `xml:"urn:vim25 FindByDnsName,omitempty"` - Res *types.FindByDnsNameResponse `xml:"urn:vim25 FindByDnsNameResponse,omitempty"` + Res *types.FindByDnsNameResponse `xml:"FindByDnsNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5405,7 +5925,7 @@ func FindByDnsName(ctx context.Context, r soap.RoundTripper, req *types.FindByDn type FindByInventoryPathBody struct { Req *types.FindByInventoryPath `xml:"urn:vim25 FindByInventoryPath,omitempty"` - Res *types.FindByInventoryPathResponse `xml:"urn:vim25 FindByInventoryPathResponse,omitempty"` + Res *types.FindByInventoryPathResponse `xml:"FindByInventoryPathResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5425,7 +5945,7 @@ func FindByInventoryPath(ctx context.Context, r soap.RoundTripper, req *types.Fi type FindByIpBody struct { Req *types.FindByIp `xml:"urn:vim25 FindByIp,omitempty"` - Res *types.FindByIpResponse `xml:"urn:vim25 FindByIpResponse,omitempty"` + Res *types.FindByIpResponse `xml:"FindByIpResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5445,7 +5965,7 @@ func FindByIp(ctx context.Context, r soap.RoundTripper, req *types.FindByIp) (*t type FindByUuidBody struct { Req *types.FindByUuid `xml:"urn:vim25 FindByUuid,omitempty"` - Res *types.FindByUuidResponse `xml:"urn:vim25 FindByUuidResponse,omitempty"` + Res *types.FindByUuidResponse `xml:"FindByUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5465,7 +5985,7 @@ func FindByUuid(ctx context.Context, r soap.RoundTripper, req *types.FindByUuid) type FindChildBody struct { Req *types.FindChild `xml:"urn:vim25 FindChild,omitempty"` - Res *types.FindChildResponse `xml:"urn:vim25 FindChildResponse,omitempty"` + Res *types.FindChildResponse `xml:"FindChildResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5485,7 +6005,7 @@ func FindChild(ctx context.Context, r soap.RoundTripper, req *types.FindChild) ( type FindExtensionBody struct { Req *types.FindExtension `xml:"urn:vim25 FindExtension,omitempty"` - Res *types.FindExtensionResponse `xml:"urn:vim25 FindExtensionResponse,omitempty"` + Res *types.FindExtensionResponse `xml:"FindExtensionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5505,7 +6025,7 @@ func FindExtension(ctx context.Context, r soap.RoundTripper, req *types.FindExte type FindRulesForVmBody struct { Req *types.FindRulesForVm `xml:"urn:vim25 FindRulesForVm,omitempty"` - Res *types.FindRulesForVmResponse `xml:"urn:vim25 FindRulesForVmResponse,omitempty"` + Res *types.FindRulesForVmResponse `xml:"FindRulesForVmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5525,7 +6045,7 @@ func FindRulesForVm(ctx context.Context, r soap.RoundTripper, req *types.FindRul type FormatVffsBody struct { Req *types.FormatVffs `xml:"urn:vim25 FormatVffs,omitempty"` - Res *types.FormatVffsResponse `xml:"urn:vim25 FormatVffsResponse,omitempty"` + Res *types.FormatVffsResponse `xml:"FormatVffsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5545,7 +6065,7 @@ func FormatVffs(ctx context.Context, r soap.RoundTripper, req *types.FormatVffs) type FormatVmfsBody struct { Req *types.FormatVmfs `xml:"urn:vim25 FormatVmfs,omitempty"` - Res *types.FormatVmfsResponse `xml:"urn:vim25 FormatVmfsResponse,omitempty"` + Res *types.FormatVmfsResponse `xml:"FormatVmfsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5565,7 +6085,7 @@ func FormatVmfs(ctx context.Context, r soap.RoundTripper, req *types.FormatVmfs) type GenerateCertificateSigningRequestBody struct { Req *types.GenerateCertificateSigningRequest `xml:"urn:vim25 GenerateCertificateSigningRequest,omitempty"` - Res *types.GenerateCertificateSigningRequestResponse `xml:"urn:vim25 GenerateCertificateSigningRequestResponse,omitempty"` + Res *types.GenerateCertificateSigningRequestResponse `xml:"GenerateCertificateSigningRequestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5585,7 +6105,7 @@ func GenerateCertificateSigningRequest(ctx context.Context, r soap.RoundTripper, type GenerateCertificateSigningRequestByDnBody struct { Req *types.GenerateCertificateSigningRequestByDn `xml:"urn:vim25 GenerateCertificateSigningRequestByDn,omitempty"` - Res *types.GenerateCertificateSigningRequestByDnResponse `xml:"urn:vim25 GenerateCertificateSigningRequestByDnResponse,omitempty"` + Res *types.GenerateCertificateSigningRequestByDnResponse `xml:"GenerateCertificateSigningRequestByDnResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5605,7 +6125,7 @@ func GenerateCertificateSigningRequestByDn(ctx context.Context, r soap.RoundTrip type GenerateClientCsrBody struct { Req *types.GenerateClientCsr `xml:"urn:vim25 GenerateClientCsr,omitempty"` - Res *types.GenerateClientCsrResponse `xml:"urn:vim25 GenerateClientCsrResponse,omitempty"` + Res *types.GenerateClientCsrResponse `xml:"GenerateClientCsrResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5625,7 +6145,7 @@ func GenerateClientCsr(ctx context.Context, r soap.RoundTripper, req *types.Gene type GenerateConfigTaskListBody struct { Req *types.GenerateConfigTaskList `xml:"urn:vim25 GenerateConfigTaskList,omitempty"` - Res *types.GenerateConfigTaskListResponse `xml:"urn:vim25 GenerateConfigTaskListResponse,omitempty"` + Res *types.GenerateConfigTaskListResponse `xml:"GenerateConfigTaskListResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5645,7 +6165,7 @@ func GenerateConfigTaskList(ctx context.Context, r soap.RoundTripper, req *types type GenerateHostConfigTaskSpec_TaskBody struct { Req *types.GenerateHostConfigTaskSpec_Task `xml:"urn:vim25 GenerateHostConfigTaskSpec_Task,omitempty"` - Res *types.GenerateHostConfigTaskSpec_TaskResponse `xml:"urn:vim25 GenerateHostConfigTaskSpec_TaskResponse,omitempty"` + Res *types.GenerateHostConfigTaskSpec_TaskResponse `xml:"GenerateHostConfigTaskSpec_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5665,7 +6185,7 @@ func GenerateHostConfigTaskSpec_Task(ctx context.Context, r soap.RoundTripper, r type GenerateHostProfileTaskList_TaskBody struct { Req *types.GenerateHostProfileTaskList_Task `xml:"urn:vim25 GenerateHostProfileTaskList_Task,omitempty"` - Res *types.GenerateHostProfileTaskList_TaskResponse `xml:"urn:vim25 GenerateHostProfileTaskList_TaskResponse,omitempty"` + Res *types.GenerateHostProfileTaskList_TaskResponse `xml:"GenerateHostProfileTaskList_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5685,7 +6205,7 @@ func GenerateHostProfileTaskList_Task(ctx context.Context, r soap.RoundTripper, type GenerateKeyBody struct { Req *types.GenerateKey `xml:"urn:vim25 GenerateKey,omitempty"` - Res *types.GenerateKeyResponse `xml:"urn:vim25 GenerateKeyResponse,omitempty"` + Res *types.GenerateKeyResponse `xml:"GenerateKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5705,7 +6225,7 @@ func GenerateKey(ctx context.Context, r soap.RoundTripper, req *types.GenerateKe type GenerateLogBundles_TaskBody struct { Req *types.GenerateLogBundles_Task `xml:"urn:vim25 GenerateLogBundles_Task,omitempty"` - Res *types.GenerateLogBundles_TaskResponse `xml:"urn:vim25 GenerateLogBundles_TaskResponse,omitempty"` + Res *types.GenerateLogBundles_TaskResponse `xml:"GenerateLogBundles_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5725,7 +6245,7 @@ func GenerateLogBundles_Task(ctx context.Context, r soap.RoundTripper, req *type type GenerateSelfSignedClientCertBody struct { Req *types.GenerateSelfSignedClientCert `xml:"urn:vim25 GenerateSelfSignedClientCert,omitempty"` - Res *types.GenerateSelfSignedClientCertResponse `xml:"urn:vim25 GenerateSelfSignedClientCertResponse,omitempty"` + Res *types.GenerateSelfSignedClientCertResponse `xml:"GenerateSelfSignedClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5745,7 +6265,7 @@ func GenerateSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req type GetAlarmBody struct { Req *types.GetAlarm `xml:"urn:vim25 GetAlarm,omitempty"` - Res *types.GetAlarmResponse `xml:"urn:vim25 GetAlarmResponse,omitempty"` + Res *types.GetAlarmResponse `xml:"GetAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5765,7 +6285,7 @@ func GetAlarm(ctx context.Context, r soap.RoundTripper, req *types.GetAlarm) (*t type GetAlarmStateBody struct { Req *types.GetAlarmState `xml:"urn:vim25 GetAlarmState,omitempty"` - Res *types.GetAlarmStateResponse `xml:"urn:vim25 GetAlarmStateResponse,omitempty"` + Res *types.GetAlarmStateResponse `xml:"GetAlarmStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5785,7 +6305,7 @@ func GetAlarmState(ctx context.Context, r soap.RoundTripper, req *types.GetAlarm type GetCustomizationSpecBody struct { Req *types.GetCustomizationSpec `xml:"urn:vim25 GetCustomizationSpec,omitempty"` - Res *types.GetCustomizationSpecResponse `xml:"urn:vim25 GetCustomizationSpecResponse,omitempty"` + Res *types.GetCustomizationSpecResponse `xml:"GetCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5803,9 +6323,29 @@ func GetCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *types.G return resBody.Res, nil } +type GetDefaultKmsClusterBody struct { + Req *types.GetDefaultKmsCluster `xml:"urn:vim25 GetDefaultKmsCluster,omitempty"` + Res *types.GetDefaultKmsClusterResponse `xml:"GetDefaultKmsClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetDefaultKmsClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func GetDefaultKmsCluster(ctx context.Context, r soap.RoundTripper, req *types.GetDefaultKmsCluster) (*types.GetDefaultKmsClusterResponse, error) { + var reqBody, resBody GetDefaultKmsClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type GetPublicKeyBody struct { Req *types.GetPublicKey `xml:"urn:vim25 GetPublicKey,omitempty"` - Res *types.GetPublicKeyResponse `xml:"urn:vim25 GetPublicKeyResponse,omitempty"` + Res *types.GetPublicKeyResponse `xml:"GetPublicKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5825,7 +6365,7 @@ func GetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.GetPublic type GetResourceUsageBody struct { Req *types.GetResourceUsage `xml:"urn:vim25 GetResourceUsage,omitempty"` - Res *types.GetResourceUsageResponse `xml:"urn:vim25 GetResourceUsageResponse,omitempty"` + Res *types.GetResourceUsageResponse `xml:"GetResourceUsageResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5843,9 +6383,49 @@ func GetResourceUsage(ctx context.Context, r soap.RoundTripper, req *types.GetRe return resBody.Res, nil } +type GetSiteInfoBody struct { + Req *types.GetSiteInfo `xml:"urn:vim25 GetSiteInfo,omitempty"` + Res *types.GetSiteInfoResponse `xml:"GetSiteInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetSiteInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func GetSiteInfo(ctx context.Context, r soap.RoundTripper, req *types.GetSiteInfo) (*types.GetSiteInfoResponse, error) { + var reqBody, resBody GetSiteInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type GetSystemVMsRestrictedDatastoresBody struct { + Req *types.GetSystemVMsRestrictedDatastores `xml:"urn:vim25 GetSystemVMsRestrictedDatastores,omitempty"` + Res *types.GetSystemVMsRestrictedDatastoresResponse `xml:"GetSystemVMsRestrictedDatastoresResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *GetSystemVMsRestrictedDatastoresBody) Fault() *soap.Fault { return b.Fault_ } + +func GetSystemVMsRestrictedDatastores(ctx context.Context, r soap.RoundTripper, req *types.GetSystemVMsRestrictedDatastores) (*types.GetSystemVMsRestrictedDatastoresResponse, error) { + var reqBody, resBody GetSystemVMsRestrictedDatastoresBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type GetVchaClusterHealthBody struct { Req *types.GetVchaClusterHealth `xml:"urn:vim25 GetVchaClusterHealth,omitempty"` - Res *types.GetVchaClusterHealthResponse `xml:"urn:vim25 GetVchaClusterHealthResponse,omitempty"` + Res *types.GetVchaClusterHealthResponse `xml:"GetVchaClusterHealthResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5865,7 +6445,7 @@ func GetVchaClusterHealth(ctx context.Context, r soap.RoundTripper, req *types.G type GetVsanObjExtAttrsBody struct { Req *types.GetVsanObjExtAttrs `xml:"urn:vim25 GetVsanObjExtAttrs,omitempty"` - Res *types.GetVsanObjExtAttrsResponse `xml:"urn:vim25 GetVsanObjExtAttrsResponse,omitempty"` + Res *types.GetVsanObjExtAttrsResponse `xml:"GetVsanObjExtAttrsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5885,7 +6465,7 @@ func GetVsanObjExtAttrs(ctx context.Context, r soap.RoundTripper, req *types.Get type HasMonitoredEntityBody struct { Req *types.HasMonitoredEntity `xml:"urn:vim25 HasMonitoredEntity,omitempty"` - Res *types.HasMonitoredEntityResponse `xml:"urn:vim25 HasMonitoredEntityResponse,omitempty"` + Res *types.HasMonitoredEntityResponse `xml:"HasMonitoredEntityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5905,7 +6485,7 @@ func HasMonitoredEntity(ctx context.Context, r soap.RoundTripper, req *types.Has type HasPrivilegeOnEntitiesBody struct { Req *types.HasPrivilegeOnEntities `xml:"urn:vim25 HasPrivilegeOnEntities,omitempty"` - Res *types.HasPrivilegeOnEntitiesResponse `xml:"urn:vim25 HasPrivilegeOnEntitiesResponse,omitempty"` + Res *types.HasPrivilegeOnEntitiesResponse `xml:"HasPrivilegeOnEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5925,7 +6505,7 @@ func HasPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *types type HasPrivilegeOnEntityBody struct { Req *types.HasPrivilegeOnEntity `xml:"urn:vim25 HasPrivilegeOnEntity,omitempty"` - Res *types.HasPrivilegeOnEntityResponse `xml:"urn:vim25 HasPrivilegeOnEntityResponse,omitempty"` + Res *types.HasPrivilegeOnEntityResponse `xml:"HasPrivilegeOnEntityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5945,7 +6525,7 @@ func HasPrivilegeOnEntity(ctx context.Context, r soap.RoundTripper, req *types.H type HasProviderBody struct { Req *types.HasProvider `xml:"urn:vim25 HasProvider,omitempty"` - Res *types.HasProviderResponse `xml:"urn:vim25 HasProviderResponse,omitempty"` + Res *types.HasProviderResponse `xml:"HasProviderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5965,7 +6545,7 @@ func HasProvider(ctx context.Context, r soap.RoundTripper, req *types.HasProvide type HasUserPrivilegeOnEntitiesBody struct { Req *types.HasUserPrivilegeOnEntities `xml:"urn:vim25 HasUserPrivilegeOnEntities,omitempty"` - Res *types.HasUserPrivilegeOnEntitiesResponse `xml:"urn:vim25 HasUserPrivilegeOnEntitiesResponse,omitempty"` + Res *types.HasUserPrivilegeOnEntitiesResponse `xml:"HasUserPrivilegeOnEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -5985,7 +6565,7 @@ func HasUserPrivilegeOnEntities(ctx context.Context, r soap.RoundTripper, req *t type HostClearVStorageObjectControlFlagsBody struct { Req *types.HostClearVStorageObjectControlFlags `xml:"urn:vim25 HostClearVStorageObjectControlFlags,omitempty"` - Res *types.HostClearVStorageObjectControlFlagsResponse `xml:"urn:vim25 HostClearVStorageObjectControlFlagsResponse,omitempty"` + Res *types.HostClearVStorageObjectControlFlagsResponse `xml:"HostClearVStorageObjectControlFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6005,7 +6585,7 @@ func HostClearVStorageObjectControlFlags(ctx context.Context, r soap.RoundTrippe type HostCloneVStorageObject_TaskBody struct { Req *types.HostCloneVStorageObject_Task `xml:"urn:vim25 HostCloneVStorageObject_Task,omitempty"` - Res *types.HostCloneVStorageObject_TaskResponse `xml:"urn:vim25 HostCloneVStorageObject_TaskResponse,omitempty"` + Res *types.HostCloneVStorageObject_TaskResponse `xml:"HostCloneVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6025,7 +6605,7 @@ func HostCloneVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req type HostConfigVFlashCacheBody struct { Req *types.HostConfigVFlashCache `xml:"urn:vim25 HostConfigVFlashCache,omitempty"` - Res *types.HostConfigVFlashCacheResponse `xml:"urn:vim25 HostConfigVFlashCacheResponse,omitempty"` + Res *types.HostConfigVFlashCacheResponse `xml:"HostConfigVFlashCacheResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6045,7 +6625,7 @@ func HostConfigVFlashCache(ctx context.Context, r soap.RoundTripper, req *types. type HostConfigureVFlashResourceBody struct { Req *types.HostConfigureVFlashResource `xml:"urn:vim25 HostConfigureVFlashResource,omitempty"` - Res *types.HostConfigureVFlashResourceResponse `xml:"urn:vim25 HostConfigureVFlashResourceResponse,omitempty"` + Res *types.HostConfigureVFlashResourceResponse `xml:"HostConfigureVFlashResourceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6065,7 +6645,7 @@ func HostConfigureVFlashResource(ctx context.Context, r soap.RoundTripper, req * type HostCreateDisk_TaskBody struct { Req *types.HostCreateDisk_Task `xml:"urn:vim25 HostCreateDisk_Task,omitempty"` - Res *types.HostCreateDisk_TaskResponse `xml:"urn:vim25 HostCreateDisk_TaskResponse,omitempty"` + Res *types.HostCreateDisk_TaskResponse `xml:"HostCreateDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6083,9 +6663,29 @@ func HostCreateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Ho return resBody.Res, nil } +type HostDeleteVStorageObjectEx_TaskBody struct { + Req *types.HostDeleteVStorageObjectEx_Task `xml:"urn:vim25 HostDeleteVStorageObjectEx_Task,omitempty"` + Res *types.HostDeleteVStorageObjectEx_TaskResponse `xml:"HostDeleteVStorageObjectEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostDeleteVStorageObjectEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostDeleteVStorageObjectEx_Task(ctx context.Context, r soap.RoundTripper, req *types.HostDeleteVStorageObjectEx_Task) (*types.HostDeleteVStorageObjectEx_TaskResponse, error) { + var reqBody, resBody HostDeleteVStorageObjectEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HostDeleteVStorageObject_TaskBody struct { Req *types.HostDeleteVStorageObject_Task `xml:"urn:vim25 HostDeleteVStorageObject_Task,omitempty"` - Res *types.HostDeleteVStorageObject_TaskResponse `xml:"urn:vim25 HostDeleteVStorageObject_TaskResponse,omitempty"` + Res *types.HostDeleteVStorageObject_TaskResponse `xml:"HostDeleteVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6105,7 +6705,7 @@ func HostDeleteVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req type HostExtendDisk_TaskBody struct { Req *types.HostExtendDisk_Task `xml:"urn:vim25 HostExtendDisk_Task,omitempty"` - Res *types.HostExtendDisk_TaskResponse `xml:"urn:vim25 HostExtendDisk_TaskResponse,omitempty"` + Res *types.HostExtendDisk_TaskResponse `xml:"HostExtendDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6125,7 +6725,7 @@ func HostExtendDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Ho type HostGetVFlashModuleDefaultConfigBody struct { Req *types.HostGetVFlashModuleDefaultConfig `xml:"urn:vim25 HostGetVFlashModuleDefaultConfig,omitempty"` - Res *types.HostGetVFlashModuleDefaultConfigResponse `xml:"urn:vim25 HostGetVFlashModuleDefaultConfigResponse,omitempty"` + Res *types.HostGetVFlashModuleDefaultConfigResponse `xml:"HostGetVFlashModuleDefaultConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6145,7 +6745,7 @@ func HostGetVFlashModuleDefaultConfig(ctx context.Context, r soap.RoundTripper, type HostImageConfigGetAcceptanceBody struct { Req *types.HostImageConfigGetAcceptance `xml:"urn:vim25 HostImageConfigGetAcceptance,omitempty"` - Res *types.HostImageConfigGetAcceptanceResponse `xml:"urn:vim25 HostImageConfigGetAcceptanceResponse,omitempty"` + Res *types.HostImageConfigGetAcceptanceResponse `xml:"HostImageConfigGetAcceptanceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6165,7 +6765,7 @@ func HostImageConfigGetAcceptance(ctx context.Context, r soap.RoundTripper, req type HostImageConfigGetProfileBody struct { Req *types.HostImageConfigGetProfile `xml:"urn:vim25 HostImageConfigGetProfile,omitempty"` - Res *types.HostImageConfigGetProfileResponse `xml:"urn:vim25 HostImageConfigGetProfileResponse,omitempty"` + Res *types.HostImageConfigGetProfileResponse `xml:"HostImageConfigGetProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6185,7 +6785,7 @@ func HostImageConfigGetProfile(ctx context.Context, r soap.RoundTripper, req *ty type HostInflateDisk_TaskBody struct { Req *types.HostInflateDisk_Task `xml:"urn:vim25 HostInflateDisk_Task,omitempty"` - Res *types.HostInflateDisk_TaskResponse `xml:"urn:vim25 HostInflateDisk_TaskResponse,omitempty"` + Res *types.HostInflateDisk_TaskResponse `xml:"HostInflateDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6205,7 +6805,7 @@ func HostInflateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.H type HostListVStorageObjectBody struct { Req *types.HostListVStorageObject `xml:"urn:vim25 HostListVStorageObject,omitempty"` - Res *types.HostListVStorageObjectResponse `xml:"urn:vim25 HostListVStorageObjectResponse,omitempty"` + Res *types.HostListVStorageObjectResponse `xml:"HostListVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6225,7 +6825,7 @@ func HostListVStorageObject(ctx context.Context, r soap.RoundTripper, req *types type HostProfileResetValidationStateBody struct { Req *types.HostProfileResetValidationState `xml:"urn:vim25 HostProfileResetValidationState,omitempty"` - Res *types.HostProfileResetValidationStateResponse `xml:"urn:vim25 HostProfileResetValidationStateResponse,omitempty"` + Res *types.HostProfileResetValidationStateResponse `xml:"HostProfileResetValidationStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6245,7 +6845,7 @@ func HostProfileResetValidationState(ctx context.Context, r soap.RoundTripper, r type HostReconcileDatastoreInventory_TaskBody struct { Req *types.HostReconcileDatastoreInventory_Task `xml:"urn:vim25 HostReconcileDatastoreInventory_Task,omitempty"` - Res *types.HostReconcileDatastoreInventory_TaskResponse `xml:"urn:vim25 HostReconcileDatastoreInventory_TaskResponse,omitempty"` + Res *types.HostReconcileDatastoreInventory_TaskResponse `xml:"HostReconcileDatastoreInventory_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6265,7 +6865,7 @@ func HostReconcileDatastoreInventory_Task(ctx context.Context, r soap.RoundTripp type HostRegisterDiskBody struct { Req *types.HostRegisterDisk `xml:"urn:vim25 HostRegisterDisk,omitempty"` - Res *types.HostRegisterDiskResponse `xml:"urn:vim25 HostRegisterDiskResponse,omitempty"` + Res *types.HostRegisterDiskResponse `xml:"HostRegisterDiskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6285,7 +6885,7 @@ func HostRegisterDisk(ctx context.Context, r soap.RoundTripper, req *types.HostR type HostRelocateVStorageObject_TaskBody struct { Req *types.HostRelocateVStorageObject_Task `xml:"urn:vim25 HostRelocateVStorageObject_Task,omitempty"` - Res *types.HostRelocateVStorageObject_TaskResponse `xml:"urn:vim25 HostRelocateVStorageObject_TaskResponse,omitempty"` + Res *types.HostRelocateVStorageObject_TaskResponse `xml:"HostRelocateVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6305,7 +6905,7 @@ func HostRelocateVStorageObject_Task(ctx context.Context, r soap.RoundTripper, r type HostRemoveVFlashResourceBody struct { Req *types.HostRemoveVFlashResource `xml:"urn:vim25 HostRemoveVFlashResource,omitempty"` - Res *types.HostRemoveVFlashResourceResponse `xml:"urn:vim25 HostRemoveVFlashResourceResponse,omitempty"` + Res *types.HostRemoveVFlashResourceResponse `xml:"HostRemoveVFlashResourceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6325,7 +6925,7 @@ func HostRemoveVFlashResource(ctx context.Context, r soap.RoundTripper, req *typ type HostRenameVStorageObjectBody struct { Req *types.HostRenameVStorageObject `xml:"urn:vim25 HostRenameVStorageObject,omitempty"` - Res *types.HostRenameVStorageObjectResponse `xml:"urn:vim25 HostRenameVStorageObjectResponse,omitempty"` + Res *types.HostRenameVStorageObjectResponse `xml:"HostRenameVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6345,7 +6945,7 @@ func HostRenameVStorageObject(ctx context.Context, r soap.RoundTripper, req *typ type HostRetrieveVStorageInfrastructureObjectPolicyBody struct { Req *types.HostRetrieveVStorageInfrastructureObjectPolicy `xml:"urn:vim25 HostRetrieveVStorageInfrastructureObjectPolicy,omitempty"` - Res *types.HostRetrieveVStorageInfrastructureObjectPolicyResponse `xml:"urn:vim25 HostRetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` + Res *types.HostRetrieveVStorageInfrastructureObjectPolicyResponse `xml:"HostRetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6365,7 +6965,7 @@ func HostRetrieveVStorageInfrastructureObjectPolicy(ctx context.Context, r soap. type HostRetrieveVStorageObjectBody struct { Req *types.HostRetrieveVStorageObject `xml:"urn:vim25 HostRetrieveVStorageObject,omitempty"` - Res *types.HostRetrieveVStorageObjectResponse `xml:"urn:vim25 HostRetrieveVStorageObjectResponse,omitempty"` + Res *types.HostRetrieveVStorageObjectResponse `xml:"HostRetrieveVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6383,9 +6983,49 @@ func HostRetrieveVStorageObject(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type HostRetrieveVStorageObjectMetadataBody struct { + Req *types.HostRetrieveVStorageObjectMetadata `xml:"urn:vim25 HostRetrieveVStorageObjectMetadata,omitempty"` + Res *types.HostRetrieveVStorageObjectMetadataResponse `xml:"HostRetrieveVStorageObjectMetadataResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRetrieveVStorageObjectMetadataBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRetrieveVStorageObjectMetadata(ctx context.Context, r soap.RoundTripper, req *types.HostRetrieveVStorageObjectMetadata) (*types.HostRetrieveVStorageObjectMetadataResponse, error) { + var reqBody, resBody HostRetrieveVStorageObjectMetadataBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostRetrieveVStorageObjectMetadataValueBody struct { + Req *types.HostRetrieveVStorageObjectMetadataValue `xml:"urn:vim25 HostRetrieveVStorageObjectMetadataValue,omitempty"` + Res *types.HostRetrieveVStorageObjectMetadataValueResponse `xml:"HostRetrieveVStorageObjectMetadataValueResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostRetrieveVStorageObjectMetadataValueBody) Fault() *soap.Fault { return b.Fault_ } + +func HostRetrieveVStorageObjectMetadataValue(ctx context.Context, r soap.RoundTripper, req *types.HostRetrieveVStorageObjectMetadataValue) (*types.HostRetrieveVStorageObjectMetadataValueResponse, error) { + var reqBody, resBody HostRetrieveVStorageObjectMetadataValueBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HostRetrieveVStorageObjectStateBody struct { Req *types.HostRetrieveVStorageObjectState `xml:"urn:vim25 HostRetrieveVStorageObjectState,omitempty"` - Res *types.HostRetrieveVStorageObjectStateResponse `xml:"urn:vim25 HostRetrieveVStorageObjectStateResponse,omitempty"` + Res *types.HostRetrieveVStorageObjectStateResponse `xml:"HostRetrieveVStorageObjectStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6405,7 +7045,7 @@ func HostRetrieveVStorageObjectState(ctx context.Context, r soap.RoundTripper, r type HostScheduleReconcileDatastoreInventoryBody struct { Req *types.HostScheduleReconcileDatastoreInventory `xml:"urn:vim25 HostScheduleReconcileDatastoreInventory,omitempty"` - Res *types.HostScheduleReconcileDatastoreInventoryResponse `xml:"urn:vim25 HostScheduleReconcileDatastoreInventoryResponse,omitempty"` + Res *types.HostScheduleReconcileDatastoreInventoryResponse `xml:"HostScheduleReconcileDatastoreInventoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6425,7 +7065,7 @@ func HostScheduleReconcileDatastoreInventory(ctx context.Context, r soap.RoundTr type HostSetVStorageObjectControlFlagsBody struct { Req *types.HostSetVStorageObjectControlFlags `xml:"urn:vim25 HostSetVStorageObjectControlFlags,omitempty"` - Res *types.HostSetVStorageObjectControlFlagsResponse `xml:"urn:vim25 HostSetVStorageObjectControlFlagsResponse,omitempty"` + Res *types.HostSetVStorageObjectControlFlagsResponse `xml:"HostSetVStorageObjectControlFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6445,7 +7085,7 @@ func HostSetVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, type HostSpecGetUpdatedHostsBody struct { Req *types.HostSpecGetUpdatedHosts `xml:"urn:vim25 HostSpecGetUpdatedHosts,omitempty"` - Res *types.HostSpecGetUpdatedHostsResponse `xml:"urn:vim25 HostSpecGetUpdatedHostsResponse,omitempty"` + Res *types.HostSpecGetUpdatedHostsResponse `xml:"HostSpecGetUpdatedHostsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6463,9 +7103,49 @@ func HostSpecGetUpdatedHosts(ctx context.Context, r soap.RoundTripper, req *type return resBody.Res, nil } +type HostUpdateVStorageObjectMetadataEx_TaskBody struct { + Req *types.HostUpdateVStorageObjectMetadataEx_Task `xml:"urn:vim25 HostUpdateVStorageObjectMetadataEx_Task,omitempty"` + Res *types.HostUpdateVStorageObjectMetadataEx_TaskResponse `xml:"HostUpdateVStorageObjectMetadataEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostUpdateVStorageObjectMetadataEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostUpdateVStorageObjectMetadataEx_Task(ctx context.Context, r soap.RoundTripper, req *types.HostUpdateVStorageObjectMetadataEx_Task) (*types.HostUpdateVStorageObjectMetadataEx_TaskResponse, error) { + var reqBody, resBody HostUpdateVStorageObjectMetadataEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type HostUpdateVStorageObjectMetadata_TaskBody struct { + Req *types.HostUpdateVStorageObjectMetadata_Task `xml:"urn:vim25 HostUpdateVStorageObjectMetadata_Task,omitempty"` + Res *types.HostUpdateVStorageObjectMetadata_TaskResponse `xml:"HostUpdateVStorageObjectMetadata_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostUpdateVStorageObjectMetadata_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostUpdateVStorageObjectMetadata_Task(ctx context.Context, r soap.RoundTripper, req *types.HostUpdateVStorageObjectMetadata_Task) (*types.HostUpdateVStorageObjectMetadata_TaskResponse, error) { + var reqBody, resBody HostUpdateVStorageObjectMetadata_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HostVStorageObjectCreateDiskFromSnapshot_TaskBody struct { Req *types.HostVStorageObjectCreateDiskFromSnapshot_Task `xml:"urn:vim25 HostVStorageObjectCreateDiskFromSnapshot_Task,omitempty"` - Res *types.HostVStorageObjectCreateDiskFromSnapshot_TaskResponse `xml:"urn:vim25 HostVStorageObjectCreateDiskFromSnapshot_TaskResponse,omitempty"` + Res *types.HostVStorageObjectCreateDiskFromSnapshot_TaskResponse `xml:"HostVStorageObjectCreateDiskFromSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6485,7 +7165,7 @@ func HostVStorageObjectCreateDiskFromSnapshot_Task(ctx context.Context, r soap.R type HostVStorageObjectCreateSnapshot_TaskBody struct { Req *types.HostVStorageObjectCreateSnapshot_Task `xml:"urn:vim25 HostVStorageObjectCreateSnapshot_Task,omitempty"` - Res *types.HostVStorageObjectCreateSnapshot_TaskResponse `xml:"urn:vim25 HostVStorageObjectCreateSnapshot_TaskResponse,omitempty"` + Res *types.HostVStorageObjectCreateSnapshot_TaskResponse `xml:"HostVStorageObjectCreateSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6505,7 +7185,7 @@ func HostVStorageObjectCreateSnapshot_Task(ctx context.Context, r soap.RoundTrip type HostVStorageObjectDeleteSnapshot_TaskBody struct { Req *types.HostVStorageObjectDeleteSnapshot_Task `xml:"urn:vim25 HostVStorageObjectDeleteSnapshot_Task,omitempty"` - Res *types.HostVStorageObjectDeleteSnapshot_TaskResponse `xml:"urn:vim25 HostVStorageObjectDeleteSnapshot_TaskResponse,omitempty"` + Res *types.HostVStorageObjectDeleteSnapshot_TaskResponse `xml:"HostVStorageObjectDeleteSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6525,7 +7205,7 @@ func HostVStorageObjectDeleteSnapshot_Task(ctx context.Context, r soap.RoundTrip type HostVStorageObjectRetrieveSnapshotInfoBody struct { Req *types.HostVStorageObjectRetrieveSnapshotInfo `xml:"urn:vim25 HostVStorageObjectRetrieveSnapshotInfo,omitempty"` - Res *types.HostVStorageObjectRetrieveSnapshotInfoResponse `xml:"urn:vim25 HostVStorageObjectRetrieveSnapshotInfoResponse,omitempty"` + Res *types.HostVStorageObjectRetrieveSnapshotInfoResponse `xml:"HostVStorageObjectRetrieveSnapshotInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6545,7 +7225,7 @@ func HostVStorageObjectRetrieveSnapshotInfo(ctx context.Context, r soap.RoundTri type HostVStorageObjectRevert_TaskBody struct { Req *types.HostVStorageObjectRevert_Task `xml:"urn:vim25 HostVStorageObjectRevert_Task,omitempty"` - Res *types.HostVStorageObjectRevert_TaskResponse `xml:"urn:vim25 HostVStorageObjectRevert_TaskResponse,omitempty"` + Res *types.HostVStorageObjectRevert_TaskResponse `xml:"HostVStorageObjectRevert_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6565,7 +7245,7 @@ func HostVStorageObjectRevert_Task(ctx context.Context, r soap.RoundTripper, req type HttpNfcLeaseAbortBody struct { Req *types.HttpNfcLeaseAbort `xml:"urn:vim25 HttpNfcLeaseAbort,omitempty"` - Res *types.HttpNfcLeaseAbortResponse `xml:"urn:vim25 HttpNfcLeaseAbortResponse,omitempty"` + Res *types.HttpNfcLeaseAbortResponse `xml:"HttpNfcLeaseAbortResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6585,7 +7265,7 @@ func HttpNfcLeaseAbort(ctx context.Context, r soap.RoundTripper, req *types.Http type HttpNfcLeaseCompleteBody struct { Req *types.HttpNfcLeaseComplete `xml:"urn:vim25 HttpNfcLeaseComplete,omitempty"` - Res *types.HttpNfcLeaseCompleteResponse `xml:"urn:vim25 HttpNfcLeaseCompleteResponse,omitempty"` + Res *types.HttpNfcLeaseCompleteResponse `xml:"HttpNfcLeaseCompleteResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6605,7 +7285,7 @@ func HttpNfcLeaseComplete(ctx context.Context, r soap.RoundTripper, req *types.H type HttpNfcLeaseGetManifestBody struct { Req *types.HttpNfcLeaseGetManifest `xml:"urn:vim25 HttpNfcLeaseGetManifest,omitempty"` - Res *types.HttpNfcLeaseGetManifestResponse `xml:"urn:vim25 HttpNfcLeaseGetManifestResponse,omitempty"` + Res *types.HttpNfcLeaseGetManifestResponse `xml:"HttpNfcLeaseGetManifestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6623,9 +7303,29 @@ func HttpNfcLeaseGetManifest(ctx context.Context, r soap.RoundTripper, req *type return resBody.Res, nil } +type HttpNfcLeaseProbeUrlsBody struct { + Req *types.HttpNfcLeaseProbeUrls `xml:"urn:vim25 HttpNfcLeaseProbeUrls,omitempty"` + Res *types.HttpNfcLeaseProbeUrlsResponse `xml:"HttpNfcLeaseProbeUrlsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HttpNfcLeaseProbeUrlsBody) Fault() *soap.Fault { return b.Fault_ } + +func HttpNfcLeaseProbeUrls(ctx context.Context, r soap.RoundTripper, req *types.HttpNfcLeaseProbeUrls) (*types.HttpNfcLeaseProbeUrlsResponse, error) { + var reqBody, resBody HttpNfcLeaseProbeUrlsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HttpNfcLeaseProgressBody struct { Req *types.HttpNfcLeaseProgress `xml:"urn:vim25 HttpNfcLeaseProgress,omitempty"` - Res *types.HttpNfcLeaseProgressResponse `xml:"urn:vim25 HttpNfcLeaseProgressResponse,omitempty"` + Res *types.HttpNfcLeaseProgressResponse `xml:"HttpNfcLeaseProgressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6645,7 +7345,7 @@ func HttpNfcLeaseProgress(ctx context.Context, r soap.RoundTripper, req *types.H type HttpNfcLeasePullFromUrls_TaskBody struct { Req *types.HttpNfcLeasePullFromUrls_Task `xml:"urn:vim25 HttpNfcLeasePullFromUrls_Task,omitempty"` - Res *types.HttpNfcLeasePullFromUrls_TaskResponse `xml:"urn:vim25 HttpNfcLeasePullFromUrls_TaskResponse,omitempty"` + Res *types.HttpNfcLeasePullFromUrls_TaskResponse `xml:"HttpNfcLeasePullFromUrls_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6665,7 +7365,7 @@ func HttpNfcLeasePullFromUrls_Task(ctx context.Context, r soap.RoundTripper, req type HttpNfcLeaseSetManifestChecksumTypeBody struct { Req *types.HttpNfcLeaseSetManifestChecksumType `xml:"urn:vim25 HttpNfcLeaseSetManifestChecksumType,omitempty"` - Res *types.HttpNfcLeaseSetManifestChecksumTypeResponse `xml:"urn:vim25 HttpNfcLeaseSetManifestChecksumTypeResponse,omitempty"` + Res *types.HttpNfcLeaseSetManifestChecksumTypeResponse `xml:"HttpNfcLeaseSetManifestChecksumTypeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6685,7 +7385,7 @@ func HttpNfcLeaseSetManifestChecksumType(ctx context.Context, r soap.RoundTrippe type ImpersonateUserBody struct { Req *types.ImpersonateUser `xml:"urn:vim25 ImpersonateUser,omitempty"` - Res *types.ImpersonateUserResponse `xml:"urn:vim25 ImpersonateUserResponse,omitempty"` + Res *types.ImpersonateUserResponse `xml:"ImpersonateUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6705,7 +7405,7 @@ func ImpersonateUser(ctx context.Context, r soap.RoundTripper, req *types.Impers type ImportCertificateForCAM_TaskBody struct { Req *types.ImportCertificateForCAM_Task `xml:"urn:vim25 ImportCertificateForCAM_Task,omitempty"` - Res *types.ImportCertificateForCAM_TaskResponse `xml:"urn:vim25 ImportCertificateForCAM_TaskResponse,omitempty"` + Res *types.ImportCertificateForCAM_TaskResponse `xml:"ImportCertificateForCAM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6725,7 +7425,7 @@ func ImportCertificateForCAM_Task(ctx context.Context, r soap.RoundTripper, req type ImportUnmanagedSnapshotBody struct { Req *types.ImportUnmanagedSnapshot `xml:"urn:vim25 ImportUnmanagedSnapshot,omitempty"` - Res *types.ImportUnmanagedSnapshotResponse `xml:"urn:vim25 ImportUnmanagedSnapshotResponse,omitempty"` + Res *types.ImportUnmanagedSnapshotResponse `xml:"ImportUnmanagedSnapshotResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6745,7 +7445,7 @@ func ImportUnmanagedSnapshot(ctx context.Context, r soap.RoundTripper, req *type type ImportVAppBody struct { Req *types.ImportVApp `xml:"urn:vim25 ImportVApp,omitempty"` - Res *types.ImportVAppResponse `xml:"urn:vim25 ImportVAppResponse,omitempty"` + Res *types.ImportVAppResponse `xml:"ImportVAppResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6765,7 +7465,7 @@ func ImportVApp(ctx context.Context, r soap.RoundTripper, req *types.ImportVApp) type InflateDisk_TaskBody struct { Req *types.InflateDisk_Task `xml:"urn:vim25 InflateDisk_Task,omitempty"` - Res *types.InflateDisk_TaskResponse `xml:"urn:vim25 InflateDisk_TaskResponse,omitempty"` + Res *types.InflateDisk_TaskResponse `xml:"InflateDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6785,7 +7485,7 @@ func InflateDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Infla type InflateVirtualDisk_TaskBody struct { Req *types.InflateVirtualDisk_Task `xml:"urn:vim25 InflateVirtualDisk_Task,omitempty"` - Res *types.InflateVirtualDisk_TaskResponse `xml:"urn:vim25 InflateVirtualDisk_TaskResponse,omitempty"` + Res *types.InflateVirtualDisk_TaskResponse `xml:"InflateVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6805,7 +7505,7 @@ func InflateVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *type type InitializeDisks_TaskBody struct { Req *types.InitializeDisks_Task `xml:"urn:vim25 InitializeDisks_Task,omitempty"` - Res *types.InitializeDisks_TaskResponse `xml:"urn:vim25 InitializeDisks_TaskResponse,omitempty"` + Res *types.InitializeDisks_TaskResponse `xml:"InitializeDisks_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6825,7 +7525,7 @@ func InitializeDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.I type InitiateFileTransferFromGuestBody struct { Req *types.InitiateFileTransferFromGuest `xml:"urn:vim25 InitiateFileTransferFromGuest,omitempty"` - Res *types.InitiateFileTransferFromGuestResponse `xml:"urn:vim25 InitiateFileTransferFromGuestResponse,omitempty"` + Res *types.InitiateFileTransferFromGuestResponse `xml:"InitiateFileTransferFromGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6845,7 +7545,7 @@ func InitiateFileTransferFromGuest(ctx context.Context, r soap.RoundTripper, req type InitiateFileTransferToGuestBody struct { Req *types.InitiateFileTransferToGuest `xml:"urn:vim25 InitiateFileTransferToGuest,omitempty"` - Res *types.InitiateFileTransferToGuestResponse `xml:"urn:vim25 InitiateFileTransferToGuestResponse,omitempty"` + Res *types.InitiateFileTransferToGuestResponse `xml:"InitiateFileTransferToGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6865,7 +7565,7 @@ func InitiateFileTransferToGuest(ctx context.Context, r soap.RoundTripper, req * type InstallHostPatchV2_TaskBody struct { Req *types.InstallHostPatchV2_Task `xml:"urn:vim25 InstallHostPatchV2_Task,omitempty"` - Res *types.InstallHostPatchV2_TaskResponse `xml:"urn:vim25 InstallHostPatchV2_TaskResponse,omitempty"` + Res *types.InstallHostPatchV2_TaskResponse `xml:"InstallHostPatchV2_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6885,7 +7585,7 @@ func InstallHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *type type InstallHostPatch_TaskBody struct { Req *types.InstallHostPatch_Task `xml:"urn:vim25 InstallHostPatch_Task,omitempty"` - Res *types.InstallHostPatch_TaskResponse `xml:"urn:vim25 InstallHostPatch_TaskResponse,omitempty"` + Res *types.InstallHostPatch_TaskResponse `xml:"InstallHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6905,7 +7605,7 @@ func InstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types. type InstallIoFilter_TaskBody struct { Req *types.InstallIoFilter_Task `xml:"urn:vim25 InstallIoFilter_Task,omitempty"` - Res *types.InstallIoFilter_TaskResponse `xml:"urn:vim25 InstallIoFilter_TaskResponse,omitempty"` + Res *types.InstallIoFilter_TaskResponse `xml:"InstallIoFilter_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6925,7 +7625,7 @@ func InstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.I type InstallServerCertificateBody struct { Req *types.InstallServerCertificate `xml:"urn:vim25 InstallServerCertificate,omitempty"` - Res *types.InstallServerCertificateResponse `xml:"urn:vim25 InstallServerCertificateResponse,omitempty"` + Res *types.InstallServerCertificateResponse `xml:"InstallServerCertificateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6945,7 +7645,7 @@ func InstallServerCertificate(ctx context.Context, r soap.RoundTripper, req *typ type InstallSmartCardTrustAnchorBody struct { Req *types.InstallSmartCardTrustAnchor `xml:"urn:vim25 InstallSmartCardTrustAnchor,omitempty"` - Res *types.InstallSmartCardTrustAnchorResponse `xml:"urn:vim25 InstallSmartCardTrustAnchorResponse,omitempty"` + Res *types.InstallSmartCardTrustAnchorResponse `xml:"InstallSmartCardTrustAnchorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6965,7 +7665,7 @@ func InstallSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req * type InstantClone_TaskBody struct { Req *types.InstantClone_Task `xml:"urn:vim25 InstantClone_Task,omitempty"` - Res *types.InstantClone_TaskResponse `xml:"urn:vim25 InstantClone_TaskResponse,omitempty"` + Res *types.InstantClone_TaskResponse `xml:"InstantClone_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -6983,9 +7683,29 @@ func InstantClone_Task(ctx context.Context, r soap.RoundTripper, req *types.Inst return resBody.Res, nil } +type IsKmsClusterActiveBody struct { + Req *types.IsKmsClusterActive `xml:"urn:vim25 IsKmsClusterActive,omitempty"` + Res *types.IsKmsClusterActiveResponse `xml:"IsKmsClusterActiveResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *IsKmsClusterActiveBody) Fault() *soap.Fault { return b.Fault_ } + +func IsKmsClusterActive(ctx context.Context, r soap.RoundTripper, req *types.IsKmsClusterActive) (*types.IsKmsClusterActiveResponse, error) { + var reqBody, resBody IsKmsClusterActiveBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type IsSharedGraphicsActiveBody struct { Req *types.IsSharedGraphicsActive `xml:"urn:vim25 IsSharedGraphicsActive,omitempty"` - Res *types.IsSharedGraphicsActiveResponse `xml:"urn:vim25 IsSharedGraphicsActiveResponse,omitempty"` + Res *types.IsSharedGraphicsActiveResponse `xml:"IsSharedGraphicsActiveResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7005,7 +7725,7 @@ func IsSharedGraphicsActive(ctx context.Context, r soap.RoundTripper, req *types type JoinDomainWithCAM_TaskBody struct { Req *types.JoinDomainWithCAM_Task `xml:"urn:vim25 JoinDomainWithCAM_Task,omitempty"` - Res *types.JoinDomainWithCAM_TaskResponse `xml:"urn:vim25 JoinDomainWithCAM_TaskResponse,omitempty"` + Res *types.JoinDomainWithCAM_TaskResponse `xml:"JoinDomainWithCAM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7025,7 +7745,7 @@ func JoinDomainWithCAM_Task(ctx context.Context, r soap.RoundTripper, req *types type JoinDomain_TaskBody struct { Req *types.JoinDomain_Task `xml:"urn:vim25 JoinDomain_Task,omitempty"` - Res *types.JoinDomain_TaskResponse `xml:"urn:vim25 JoinDomain_TaskResponse,omitempty"` + Res *types.JoinDomain_TaskResponse `xml:"JoinDomain_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7045,7 +7765,7 @@ func JoinDomain_Task(ctx context.Context, r soap.RoundTripper, req *types.JoinDo type LeaveCurrentDomain_TaskBody struct { Req *types.LeaveCurrentDomain_Task `xml:"urn:vim25 LeaveCurrentDomain_Task,omitempty"` - Res *types.LeaveCurrentDomain_TaskResponse `xml:"urn:vim25 LeaveCurrentDomain_TaskResponse,omitempty"` + Res *types.LeaveCurrentDomain_TaskResponse `xml:"LeaveCurrentDomain_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7065,7 +7785,7 @@ func LeaveCurrentDomain_Task(ctx context.Context, r soap.RoundTripper, req *type type ListCACertificateRevocationListsBody struct { Req *types.ListCACertificateRevocationLists `xml:"urn:vim25 ListCACertificateRevocationLists,omitempty"` - Res *types.ListCACertificateRevocationListsResponse `xml:"urn:vim25 ListCACertificateRevocationListsResponse,omitempty"` + Res *types.ListCACertificateRevocationListsResponse `xml:"ListCACertificateRevocationListsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7085,7 +7805,7 @@ func ListCACertificateRevocationLists(ctx context.Context, r soap.RoundTripper, type ListCACertificatesBody struct { Req *types.ListCACertificates `xml:"urn:vim25 ListCACertificates,omitempty"` - Res *types.ListCACertificatesResponse `xml:"urn:vim25 ListCACertificatesResponse,omitempty"` + Res *types.ListCACertificatesResponse `xml:"ListCACertificatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7105,7 +7825,7 @@ func ListCACertificates(ctx context.Context, r soap.RoundTripper, req *types.Lis type ListFilesInGuestBody struct { Req *types.ListFilesInGuest `xml:"urn:vim25 ListFilesInGuest,omitempty"` - Res *types.ListFilesInGuestResponse `xml:"urn:vim25 ListFilesInGuestResponse,omitempty"` + Res *types.ListFilesInGuestResponse `xml:"ListFilesInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7125,7 +7845,7 @@ func ListFilesInGuest(ctx context.Context, r soap.RoundTripper, req *types.ListF type ListGuestAliasesBody struct { Req *types.ListGuestAliases `xml:"urn:vim25 ListGuestAliases,omitempty"` - Res *types.ListGuestAliasesResponse `xml:"urn:vim25 ListGuestAliasesResponse,omitempty"` + Res *types.ListGuestAliasesResponse `xml:"ListGuestAliasesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7145,7 +7865,7 @@ func ListGuestAliases(ctx context.Context, r soap.RoundTripper, req *types.ListG type ListGuestMappedAliasesBody struct { Req *types.ListGuestMappedAliases `xml:"urn:vim25 ListGuestMappedAliases,omitempty"` - Res *types.ListGuestMappedAliasesResponse `xml:"urn:vim25 ListGuestMappedAliasesResponse,omitempty"` + Res *types.ListGuestMappedAliasesResponse `xml:"ListGuestMappedAliasesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7165,7 +7885,7 @@ func ListGuestMappedAliases(ctx context.Context, r soap.RoundTripper, req *types type ListKeysBody struct { Req *types.ListKeys `xml:"urn:vim25 ListKeys,omitempty"` - Res *types.ListKeysResponse `xml:"urn:vim25 ListKeysResponse,omitempty"` + Res *types.ListKeysResponse `xml:"ListKeysResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7185,7 +7905,7 @@ func ListKeys(ctx context.Context, r soap.RoundTripper, req *types.ListKeys) (*t type ListKmipServersBody struct { Req *types.ListKmipServers `xml:"urn:vim25 ListKmipServers,omitempty"` - Res *types.ListKmipServersResponse `xml:"urn:vim25 ListKmipServersResponse,omitempty"` + Res *types.ListKmipServersResponse `xml:"ListKmipServersResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7203,9 +7923,29 @@ func ListKmipServers(ctx context.Context, r soap.RoundTripper, req *types.ListKm return resBody.Res, nil } +type ListKmsClustersBody struct { + Req *types.ListKmsClusters `xml:"urn:vim25 ListKmsClusters,omitempty"` + Res *types.ListKmsClustersResponse `xml:"ListKmsClustersResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ListKmsClustersBody) Fault() *soap.Fault { return b.Fault_ } + +func ListKmsClusters(ctx context.Context, r soap.RoundTripper, req *types.ListKmsClusters) (*types.ListKmsClustersResponse, error) { + var reqBody, resBody ListKmsClustersBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ListProcessesInGuestBody struct { Req *types.ListProcessesInGuest `xml:"urn:vim25 ListProcessesInGuest,omitempty"` - Res *types.ListProcessesInGuestResponse `xml:"urn:vim25 ListProcessesInGuestResponse,omitempty"` + Res *types.ListProcessesInGuestResponse `xml:"ListProcessesInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7225,7 +7965,7 @@ func ListProcessesInGuest(ctx context.Context, r soap.RoundTripper, req *types.L type ListRegistryKeysInGuestBody struct { Req *types.ListRegistryKeysInGuest `xml:"urn:vim25 ListRegistryKeysInGuest,omitempty"` - Res *types.ListRegistryKeysInGuestResponse `xml:"urn:vim25 ListRegistryKeysInGuestResponse,omitempty"` + Res *types.ListRegistryKeysInGuestResponse `xml:"ListRegistryKeysInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7245,7 +7985,7 @@ func ListRegistryKeysInGuest(ctx context.Context, r soap.RoundTripper, req *type type ListRegistryValuesInGuestBody struct { Req *types.ListRegistryValuesInGuest `xml:"urn:vim25 ListRegistryValuesInGuest,omitempty"` - Res *types.ListRegistryValuesInGuestResponse `xml:"urn:vim25 ListRegistryValuesInGuestResponse,omitempty"` + Res *types.ListRegistryValuesInGuestResponse `xml:"ListRegistryValuesInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7265,7 +8005,7 @@ func ListRegistryValuesInGuest(ctx context.Context, r soap.RoundTripper, req *ty type ListSmartCardTrustAnchorsBody struct { Req *types.ListSmartCardTrustAnchors `xml:"urn:vim25 ListSmartCardTrustAnchors,omitempty"` - Res *types.ListSmartCardTrustAnchorsResponse `xml:"urn:vim25 ListSmartCardTrustAnchorsResponse,omitempty"` + Res *types.ListSmartCardTrustAnchorsResponse `xml:"ListSmartCardTrustAnchorsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7285,7 +8025,7 @@ func ListSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req *ty type ListTagsAttachedToVStorageObjectBody struct { Req *types.ListTagsAttachedToVStorageObject `xml:"urn:vim25 ListTagsAttachedToVStorageObject,omitempty"` - Res *types.ListTagsAttachedToVStorageObjectResponse `xml:"urn:vim25 ListTagsAttachedToVStorageObjectResponse,omitempty"` + Res *types.ListTagsAttachedToVStorageObjectResponse `xml:"ListTagsAttachedToVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7305,7 +8045,7 @@ func ListTagsAttachedToVStorageObject(ctx context.Context, r soap.RoundTripper, type ListVStorageObjectBody struct { Req *types.ListVStorageObject `xml:"urn:vim25 ListVStorageObject,omitempty"` - Res *types.ListVStorageObjectResponse `xml:"urn:vim25 ListVStorageObjectResponse,omitempty"` + Res *types.ListVStorageObjectResponse `xml:"ListVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7325,7 +8065,7 @@ func ListVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.Lis type ListVStorageObjectsAttachedToTagBody struct { Req *types.ListVStorageObjectsAttachedToTag `xml:"urn:vim25 ListVStorageObjectsAttachedToTag,omitempty"` - Res *types.ListVStorageObjectsAttachedToTagResponse `xml:"urn:vim25 ListVStorageObjectsAttachedToTagResponse,omitempty"` + Res *types.ListVStorageObjectsAttachedToTagResponse `xml:"ListVStorageObjectsAttachedToTagResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7345,7 +8085,7 @@ func ListVStorageObjectsAttachedToTag(ctx context.Context, r soap.RoundTripper, type LogUserEventBody struct { Req *types.LogUserEvent `xml:"urn:vim25 LogUserEvent,omitempty"` - Res *types.LogUserEventResponse `xml:"urn:vim25 LogUserEventResponse,omitempty"` + Res *types.LogUserEventResponse `xml:"LogUserEventResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7365,7 +8105,7 @@ func LogUserEvent(ctx context.Context, r soap.RoundTripper, req *types.LogUserEv type LoginBody struct { Req *types.Login `xml:"urn:vim25 Login,omitempty"` - Res *types.LoginResponse `xml:"urn:vim25 LoginResponse,omitempty"` + Res *types.LoginResponse `xml:"LoginResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7385,7 +8125,7 @@ func Login(ctx context.Context, r soap.RoundTripper, req *types.Login) (*types.L type LoginBySSPIBody struct { Req *types.LoginBySSPI `xml:"urn:vim25 LoginBySSPI,omitempty"` - Res *types.LoginBySSPIResponse `xml:"urn:vim25 LoginBySSPIResponse,omitempty"` + Res *types.LoginBySSPIResponse `xml:"LoginBySSPIResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7405,7 +8145,7 @@ func LoginBySSPI(ctx context.Context, r soap.RoundTripper, req *types.LoginBySSP type LoginByTokenBody struct { Req *types.LoginByToken `xml:"urn:vim25 LoginByToken,omitempty"` - Res *types.LoginByTokenResponse `xml:"urn:vim25 LoginByTokenResponse,omitempty"` + Res *types.LoginByTokenResponse `xml:"LoginByTokenResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7425,7 +8165,7 @@ func LoginByToken(ctx context.Context, r soap.RoundTripper, req *types.LoginByTo type LoginExtensionByCertificateBody struct { Req *types.LoginExtensionByCertificate `xml:"urn:vim25 LoginExtensionByCertificate,omitempty"` - Res *types.LoginExtensionByCertificateResponse `xml:"urn:vim25 LoginExtensionByCertificateResponse,omitempty"` + Res *types.LoginExtensionByCertificateResponse `xml:"LoginExtensionByCertificateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7445,7 +8185,7 @@ func LoginExtensionByCertificate(ctx context.Context, r soap.RoundTripper, req * type LoginExtensionBySubjectNameBody struct { Req *types.LoginExtensionBySubjectName `xml:"urn:vim25 LoginExtensionBySubjectName,omitempty"` - Res *types.LoginExtensionBySubjectNameResponse `xml:"urn:vim25 LoginExtensionBySubjectNameResponse,omitempty"` + Res *types.LoginExtensionBySubjectNameResponse `xml:"LoginExtensionBySubjectNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7465,7 +8205,7 @@ func LoginExtensionBySubjectName(ctx context.Context, r soap.RoundTripper, req * type LogoutBody struct { Req *types.Logout `xml:"urn:vim25 Logout,omitempty"` - Res *types.LogoutResponse `xml:"urn:vim25 LogoutResponse,omitempty"` + Res *types.LogoutResponse `xml:"LogoutResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7485,7 +8225,7 @@ func Logout(ctx context.Context, r soap.RoundTripper, req *types.Logout) (*types type LookupDvPortGroupBody struct { Req *types.LookupDvPortGroup `xml:"urn:vim25 LookupDvPortGroup,omitempty"` - Res *types.LookupDvPortGroupResponse `xml:"urn:vim25 LookupDvPortGroupResponse,omitempty"` + Res *types.LookupDvPortGroupResponse `xml:"LookupDvPortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7505,7 +8245,7 @@ func LookupDvPortGroup(ctx context.Context, r soap.RoundTripper, req *types.Look type LookupVmOverheadMemoryBody struct { Req *types.LookupVmOverheadMemory `xml:"urn:vim25 LookupVmOverheadMemory,omitempty"` - Res *types.LookupVmOverheadMemoryResponse `xml:"urn:vim25 LookupVmOverheadMemoryResponse,omitempty"` + Res *types.LookupVmOverheadMemoryResponse `xml:"LookupVmOverheadMemoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7525,7 +8265,7 @@ func LookupVmOverheadMemory(ctx context.Context, r soap.RoundTripper, req *types type MakeDirectoryBody struct { Req *types.MakeDirectory `xml:"urn:vim25 MakeDirectory,omitempty"` - Res *types.MakeDirectoryResponse `xml:"urn:vim25 MakeDirectoryResponse,omitempty"` + Res *types.MakeDirectoryResponse `xml:"MakeDirectoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7545,7 +8285,7 @@ func MakeDirectory(ctx context.Context, r soap.RoundTripper, req *types.MakeDire type MakeDirectoryInGuestBody struct { Req *types.MakeDirectoryInGuest `xml:"urn:vim25 MakeDirectoryInGuest,omitempty"` - Res *types.MakeDirectoryInGuestResponse `xml:"urn:vim25 MakeDirectoryInGuestResponse,omitempty"` + Res *types.MakeDirectoryInGuestResponse `xml:"MakeDirectoryInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7565,7 +8305,7 @@ func MakeDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.M type MakePrimaryVM_TaskBody struct { Req *types.MakePrimaryVM_Task `xml:"urn:vim25 MakePrimaryVM_Task,omitempty"` - Res *types.MakePrimaryVM_TaskResponse `xml:"urn:vim25 MakePrimaryVM_TaskResponse,omitempty"` + Res *types.MakePrimaryVM_TaskResponse `xml:"MakePrimaryVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7585,7 +8325,7 @@ func MakePrimaryVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Mak type MarkAsLocal_TaskBody struct { Req *types.MarkAsLocal_Task `xml:"urn:vim25 MarkAsLocal_Task,omitempty"` - Res *types.MarkAsLocal_TaskResponse `xml:"urn:vim25 MarkAsLocal_TaskResponse,omitempty"` + Res *types.MarkAsLocal_TaskResponse `xml:"MarkAsLocal_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7605,7 +8345,7 @@ func MarkAsLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkA type MarkAsNonLocal_TaskBody struct { Req *types.MarkAsNonLocal_Task `xml:"urn:vim25 MarkAsNonLocal_Task,omitempty"` - Res *types.MarkAsNonLocal_TaskResponse `xml:"urn:vim25 MarkAsNonLocal_TaskResponse,omitempty"` + Res *types.MarkAsNonLocal_TaskResponse `xml:"MarkAsNonLocal_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7625,7 +8365,7 @@ func MarkAsNonLocal_Task(ctx context.Context, r soap.RoundTripper, req *types.Ma type MarkAsNonSsd_TaskBody struct { Req *types.MarkAsNonSsd_Task `xml:"urn:vim25 MarkAsNonSsd_Task,omitempty"` - Res *types.MarkAsNonSsd_TaskResponse `xml:"urn:vim25 MarkAsNonSsd_TaskResponse,omitempty"` + Res *types.MarkAsNonSsd_TaskResponse `xml:"MarkAsNonSsd_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7645,7 +8385,7 @@ func MarkAsNonSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.Mark type MarkAsSsd_TaskBody struct { Req *types.MarkAsSsd_Task `xml:"urn:vim25 MarkAsSsd_Task,omitempty"` - Res *types.MarkAsSsd_TaskResponse `xml:"urn:vim25 MarkAsSsd_TaskResponse,omitempty"` + Res *types.MarkAsSsd_TaskResponse `xml:"MarkAsSsd_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7665,7 +8405,7 @@ func MarkAsSsd_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkAsS type MarkAsTemplateBody struct { Req *types.MarkAsTemplate `xml:"urn:vim25 MarkAsTemplate,omitempty"` - Res *types.MarkAsTemplateResponse `xml:"urn:vim25 MarkAsTemplateResponse,omitempty"` + Res *types.MarkAsTemplateResponse `xml:"MarkAsTemplateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7685,7 +8425,7 @@ func MarkAsTemplate(ctx context.Context, r soap.RoundTripper, req *types.MarkAsT type MarkAsVirtualMachineBody struct { Req *types.MarkAsVirtualMachine `xml:"urn:vim25 MarkAsVirtualMachine,omitempty"` - Res *types.MarkAsVirtualMachineResponse `xml:"urn:vim25 MarkAsVirtualMachineResponse,omitempty"` + Res *types.MarkAsVirtualMachineResponse `xml:"MarkAsVirtualMachineResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7705,7 +8445,7 @@ func MarkAsVirtualMachine(ctx context.Context, r soap.RoundTripper, req *types.M type MarkDefaultBody struct { Req *types.MarkDefault `xml:"urn:vim25 MarkDefault,omitempty"` - Res *types.MarkDefaultResponse `xml:"urn:vim25 MarkDefaultResponse,omitempty"` + Res *types.MarkDefaultResponse `xml:"MarkDefaultResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7725,7 +8465,7 @@ func MarkDefault(ctx context.Context, r soap.RoundTripper, req *types.MarkDefaul type MarkForRemovalBody struct { Req *types.MarkForRemoval `xml:"urn:vim25 MarkForRemoval,omitempty"` - Res *types.MarkForRemovalResponse `xml:"urn:vim25 MarkForRemovalResponse,omitempty"` + Res *types.MarkForRemovalResponse `xml:"MarkForRemovalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7743,9 +8483,69 @@ func MarkForRemoval(ctx context.Context, r soap.RoundTripper, req *types.MarkFor return resBody.Res, nil } +type MarkPerenniallyReservedBody struct { + Req *types.MarkPerenniallyReserved `xml:"urn:vim25 MarkPerenniallyReserved,omitempty"` + Res *types.MarkPerenniallyReservedResponse `xml:"MarkPerenniallyReservedResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkPerenniallyReservedBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkPerenniallyReserved(ctx context.Context, r soap.RoundTripper, req *types.MarkPerenniallyReserved) (*types.MarkPerenniallyReservedResponse, error) { + var reqBody, resBody MarkPerenniallyReservedBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkPerenniallyReservedEx_TaskBody struct { + Req *types.MarkPerenniallyReservedEx_Task `xml:"urn:vim25 MarkPerenniallyReservedEx_Task,omitempty"` + Res *types.MarkPerenniallyReservedEx_TaskResponse `xml:"MarkPerenniallyReservedEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkPerenniallyReservedEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkPerenniallyReservedEx_Task(ctx context.Context, r soap.RoundTripper, req *types.MarkPerenniallyReservedEx_Task) (*types.MarkPerenniallyReservedEx_TaskResponse, error) { + var reqBody, resBody MarkPerenniallyReservedEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type MarkServiceProviderEntitiesBody struct { + Req *types.MarkServiceProviderEntities `xml:"urn:vim25 MarkServiceProviderEntities,omitempty"` + Res *types.MarkServiceProviderEntitiesResponse `xml:"MarkServiceProviderEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *MarkServiceProviderEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func MarkServiceProviderEntities(ctx context.Context, r soap.RoundTripper, req *types.MarkServiceProviderEntities) (*types.MarkServiceProviderEntitiesResponse, error) { + var reqBody, resBody MarkServiceProviderEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type MergeDvs_TaskBody struct { Req *types.MergeDvs_Task `xml:"urn:vim25 MergeDvs_Task,omitempty"` - Res *types.MergeDvs_TaskResponse `xml:"urn:vim25 MergeDvs_TaskResponse,omitempty"` + Res *types.MergeDvs_TaskResponse `xml:"MergeDvs_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7765,7 +8565,7 @@ func MergeDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.MergeDvs type MergePermissionsBody struct { Req *types.MergePermissions `xml:"urn:vim25 MergePermissions,omitempty"` - Res *types.MergePermissionsResponse `xml:"urn:vim25 MergePermissionsResponse,omitempty"` + Res *types.MergePermissionsResponse `xml:"MergePermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7785,7 +8585,7 @@ func MergePermissions(ctx context.Context, r soap.RoundTripper, req *types.Merge type MigrateVM_TaskBody struct { Req *types.MigrateVM_Task `xml:"urn:vim25 MigrateVM_Task,omitempty"` - Res *types.MigrateVM_TaskResponse `xml:"urn:vim25 MigrateVM_TaskResponse,omitempty"` + Res *types.MigrateVM_TaskResponse `xml:"MigrateVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7805,7 +8605,7 @@ func MigrateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Migrate type ModifyListViewBody struct { Req *types.ModifyListView `xml:"urn:vim25 ModifyListView,omitempty"` - Res *types.ModifyListViewResponse `xml:"urn:vim25 ModifyListViewResponse,omitempty"` + Res *types.ModifyListViewResponse `xml:"ModifyListViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7825,7 +8625,7 @@ func ModifyListView(ctx context.Context, r soap.RoundTripper, req *types.ModifyL type MountToolsInstallerBody struct { Req *types.MountToolsInstaller `xml:"urn:vim25 MountToolsInstaller,omitempty"` - Res *types.MountToolsInstallerResponse `xml:"urn:vim25 MountToolsInstallerResponse,omitempty"` + Res *types.MountToolsInstallerResponse `xml:"MountToolsInstallerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7845,7 +8645,7 @@ func MountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types.Mo type MountVffsVolumeBody struct { Req *types.MountVffsVolume `xml:"urn:vim25 MountVffsVolume,omitempty"` - Res *types.MountVffsVolumeResponse `xml:"urn:vim25 MountVffsVolumeResponse,omitempty"` + Res *types.MountVffsVolumeResponse `xml:"MountVffsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7865,7 +8665,7 @@ func MountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountV type MountVmfsVolumeBody struct { Req *types.MountVmfsVolume `xml:"urn:vim25 MountVmfsVolume,omitempty"` - Res *types.MountVmfsVolumeResponse `xml:"urn:vim25 MountVmfsVolumeResponse,omitempty"` + Res *types.MountVmfsVolumeResponse `xml:"MountVmfsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7885,7 +8685,7 @@ func MountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.MountV type MountVmfsVolumeEx_TaskBody struct { Req *types.MountVmfsVolumeEx_Task `xml:"urn:vim25 MountVmfsVolumeEx_Task,omitempty"` - Res *types.MountVmfsVolumeEx_TaskResponse `xml:"urn:vim25 MountVmfsVolumeEx_TaskResponse,omitempty"` + Res *types.MountVmfsVolumeEx_TaskResponse `xml:"MountVmfsVolumeEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7905,7 +8705,7 @@ func MountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types type MoveDVPort_TaskBody struct { Req *types.MoveDVPort_Task `xml:"urn:vim25 MoveDVPort_Task,omitempty"` - Res *types.MoveDVPort_TaskResponse `xml:"urn:vim25 MoveDVPort_TaskResponse,omitempty"` + Res *types.MoveDVPort_TaskResponse `xml:"MoveDVPort_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7925,7 +8725,7 @@ func MoveDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveDV type MoveDatastoreFile_TaskBody struct { Req *types.MoveDatastoreFile_Task `xml:"urn:vim25 MoveDatastoreFile_Task,omitempty"` - Res *types.MoveDatastoreFile_TaskResponse `xml:"urn:vim25 MoveDatastoreFile_TaskResponse,omitempty"` + Res *types.MoveDatastoreFile_TaskResponse `xml:"MoveDatastoreFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7945,7 +8745,7 @@ func MoveDatastoreFile_Task(ctx context.Context, r soap.RoundTripper, req *types type MoveDirectoryInGuestBody struct { Req *types.MoveDirectoryInGuest `xml:"urn:vim25 MoveDirectoryInGuest,omitempty"` - Res *types.MoveDirectoryInGuestResponse `xml:"urn:vim25 MoveDirectoryInGuestResponse,omitempty"` + Res *types.MoveDirectoryInGuestResponse `xml:"MoveDirectoryInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7965,7 +8765,7 @@ func MoveDirectoryInGuest(ctx context.Context, r soap.RoundTripper, req *types.M type MoveFileInGuestBody struct { Req *types.MoveFileInGuest `xml:"urn:vim25 MoveFileInGuest,omitempty"` - Res *types.MoveFileInGuestResponse `xml:"urn:vim25 MoveFileInGuestResponse,omitempty"` + Res *types.MoveFileInGuestResponse `xml:"MoveFileInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -7985,7 +8785,7 @@ func MoveFileInGuest(ctx context.Context, r soap.RoundTripper, req *types.MoveFi type MoveHostInto_TaskBody struct { Req *types.MoveHostInto_Task `xml:"urn:vim25 MoveHostInto_Task,omitempty"` - Res *types.MoveHostInto_TaskResponse `xml:"urn:vim25 MoveHostInto_TaskResponse,omitempty"` + Res *types.MoveHostInto_TaskResponse `xml:"MoveHostInto_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8005,7 +8805,7 @@ func MoveHostInto_Task(ctx context.Context, r soap.RoundTripper, req *types.Move type MoveIntoFolder_TaskBody struct { Req *types.MoveIntoFolder_Task `xml:"urn:vim25 MoveIntoFolder_Task,omitempty"` - Res *types.MoveIntoFolder_TaskResponse `xml:"urn:vim25 MoveIntoFolder_TaskResponse,omitempty"` + Res *types.MoveIntoFolder_TaskResponse `xml:"MoveIntoFolder_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8025,7 +8825,7 @@ func MoveIntoFolder_Task(ctx context.Context, r soap.RoundTripper, req *types.Mo type MoveIntoResourcePoolBody struct { Req *types.MoveIntoResourcePool `xml:"urn:vim25 MoveIntoResourcePool,omitempty"` - Res *types.MoveIntoResourcePoolResponse `xml:"urn:vim25 MoveIntoResourcePoolResponse,omitempty"` + Res *types.MoveIntoResourcePoolResponse `xml:"MoveIntoResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8045,7 +8845,7 @@ func MoveIntoResourcePool(ctx context.Context, r soap.RoundTripper, req *types.M type MoveInto_TaskBody struct { Req *types.MoveInto_Task `xml:"urn:vim25 MoveInto_Task,omitempty"` - Res *types.MoveInto_TaskResponse `xml:"urn:vim25 MoveInto_TaskResponse,omitempty"` + Res *types.MoveInto_TaskResponse `xml:"MoveInto_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8065,7 +8865,7 @@ func MoveInto_Task(ctx context.Context, r soap.RoundTripper, req *types.MoveInto type MoveVirtualDisk_TaskBody struct { Req *types.MoveVirtualDisk_Task `xml:"urn:vim25 MoveVirtualDisk_Task,omitempty"` - Res *types.MoveVirtualDisk_TaskResponse `xml:"urn:vim25 MoveVirtualDisk_TaskResponse,omitempty"` + Res *types.MoveVirtualDisk_TaskResponse `xml:"MoveVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8085,7 +8885,7 @@ func MoveVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.M type OpenInventoryViewFolderBody struct { Req *types.OpenInventoryViewFolder `xml:"urn:vim25 OpenInventoryViewFolder,omitempty"` - Res *types.OpenInventoryViewFolderResponse `xml:"urn:vim25 OpenInventoryViewFolderResponse,omitempty"` + Res *types.OpenInventoryViewFolderResponse `xml:"OpenInventoryViewFolderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8105,7 +8905,7 @@ func OpenInventoryViewFolder(ctx context.Context, r soap.RoundTripper, req *type type OverwriteCustomizationSpecBody struct { Req *types.OverwriteCustomizationSpec `xml:"urn:vim25 OverwriteCustomizationSpec,omitempty"` - Res *types.OverwriteCustomizationSpecResponse `xml:"urn:vim25 OverwriteCustomizationSpecResponse,omitempty"` + Res *types.OverwriteCustomizationSpecResponse `xml:"OverwriteCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8125,7 +8925,7 @@ func OverwriteCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *t type ParseDescriptorBody struct { Req *types.ParseDescriptor `xml:"urn:vim25 ParseDescriptor,omitempty"` - Res *types.ParseDescriptorResponse `xml:"urn:vim25 ParseDescriptorResponse,omitempty"` + Res *types.ParseDescriptorResponse `xml:"ParseDescriptorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8145,7 +8945,7 @@ func ParseDescriptor(ctx context.Context, r soap.RoundTripper, req *types.ParseD type PerformDvsProductSpecOperation_TaskBody struct { Req *types.PerformDvsProductSpecOperation_Task `xml:"urn:vim25 PerformDvsProductSpecOperation_Task,omitempty"` - Res *types.PerformDvsProductSpecOperation_TaskResponse `xml:"urn:vim25 PerformDvsProductSpecOperation_TaskResponse,omitempty"` + Res *types.PerformDvsProductSpecOperation_TaskResponse `xml:"PerformDvsProductSpecOperation_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8165,7 +8965,7 @@ func PerformDvsProductSpecOperation_Task(ctx context.Context, r soap.RoundTrippe type PerformVsanUpgradePreflightCheckBody struct { Req *types.PerformVsanUpgradePreflightCheck `xml:"urn:vim25 PerformVsanUpgradePreflightCheck,omitempty"` - Res *types.PerformVsanUpgradePreflightCheckResponse `xml:"urn:vim25 PerformVsanUpgradePreflightCheckResponse,omitempty"` + Res *types.PerformVsanUpgradePreflightCheckResponse `xml:"PerformVsanUpgradePreflightCheckResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8185,7 +8985,7 @@ func PerformVsanUpgradePreflightCheck(ctx context.Context, r soap.RoundTripper, type PerformVsanUpgrade_TaskBody struct { Req *types.PerformVsanUpgrade_Task `xml:"urn:vim25 PerformVsanUpgrade_Task,omitempty"` - Res *types.PerformVsanUpgrade_TaskResponse `xml:"urn:vim25 PerformVsanUpgrade_TaskResponse,omitempty"` + Res *types.PerformVsanUpgrade_TaskResponse `xml:"PerformVsanUpgrade_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8205,7 +9005,7 @@ func PerformVsanUpgrade_Task(ctx context.Context, r soap.RoundTripper, req *type type PlaceVmBody struct { Req *types.PlaceVm `xml:"urn:vim25 PlaceVm,omitempty"` - Res *types.PlaceVmResponse `xml:"urn:vim25 PlaceVmResponse,omitempty"` + Res *types.PlaceVmResponse `xml:"PlaceVmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8225,7 +9025,7 @@ func PlaceVm(ctx context.Context, r soap.RoundTripper, req *types.PlaceVm) (*typ type PostEventBody struct { Req *types.PostEvent `xml:"urn:vim25 PostEvent,omitempty"` - Res *types.PostEventResponse `xml:"urn:vim25 PostEventResponse,omitempty"` + Res *types.PostEventResponse `xml:"PostEventResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8245,7 +9045,7 @@ func PostEvent(ctx context.Context, r soap.RoundTripper, req *types.PostEvent) ( type PostHealthUpdatesBody struct { Req *types.PostHealthUpdates `xml:"urn:vim25 PostHealthUpdates,omitempty"` - Res *types.PostHealthUpdatesResponse `xml:"urn:vim25 PostHealthUpdatesResponse,omitempty"` + Res *types.PostHealthUpdatesResponse `xml:"PostHealthUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8265,7 +9065,7 @@ func PostHealthUpdates(ctx context.Context, r soap.RoundTripper, req *types.Post type PowerDownHostToStandBy_TaskBody struct { Req *types.PowerDownHostToStandBy_Task `xml:"urn:vim25 PowerDownHostToStandBy_Task,omitempty"` - Res *types.PowerDownHostToStandBy_TaskResponse `xml:"urn:vim25 PowerDownHostToStandBy_TaskResponse,omitempty"` + Res *types.PowerDownHostToStandBy_TaskResponse `xml:"PowerDownHostToStandBy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8285,7 +9085,7 @@ func PowerDownHostToStandBy_Task(ctx context.Context, r soap.RoundTripper, req * type PowerOffVApp_TaskBody struct { Req *types.PowerOffVApp_Task `xml:"urn:vim25 PowerOffVApp_Task,omitempty"` - Res *types.PowerOffVApp_TaskResponse `xml:"urn:vim25 PowerOffVApp_TaskResponse,omitempty"` + Res *types.PowerOffVApp_TaskResponse `xml:"PowerOffVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8305,7 +9105,7 @@ func PowerOffVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.Powe type PowerOffVM_TaskBody struct { Req *types.PowerOffVM_Task `xml:"urn:vim25 PowerOffVM_Task,omitempty"` - Res *types.PowerOffVM_TaskResponse `xml:"urn:vim25 PowerOffVM_TaskResponse,omitempty"` + Res *types.PowerOffVM_TaskResponse `xml:"PowerOffVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8325,7 +9125,7 @@ func PowerOffVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerO type PowerOnMultiVM_TaskBody struct { Req *types.PowerOnMultiVM_Task `xml:"urn:vim25 PowerOnMultiVM_Task,omitempty"` - Res *types.PowerOnMultiVM_TaskResponse `xml:"urn:vim25 PowerOnMultiVM_TaskResponse,omitempty"` + Res *types.PowerOnMultiVM_TaskResponse `xml:"PowerOnMultiVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8345,7 +9145,7 @@ func PowerOnMultiVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Po type PowerOnVApp_TaskBody struct { Req *types.PowerOnVApp_Task `xml:"urn:vim25 PowerOnVApp_Task,omitempty"` - Res *types.PowerOnVApp_TaskResponse `xml:"urn:vim25 PowerOnVApp_TaskResponse,omitempty"` + Res *types.PowerOnVApp_TaskResponse `xml:"PowerOnVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8365,7 +9165,7 @@ func PowerOnVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.Power type PowerOnVM_TaskBody struct { Req *types.PowerOnVM_Task `xml:"urn:vim25 PowerOnVM_Task,omitempty"` - Res *types.PowerOnVM_TaskResponse `xml:"urn:vim25 PowerOnVM_TaskResponse,omitempty"` + Res *types.PowerOnVM_TaskResponse `xml:"PowerOnVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8385,7 +9185,7 @@ func PowerOnVM_Task(ctx context.Context, r soap.RoundTripper, req *types.PowerOn type PowerUpHostFromStandBy_TaskBody struct { Req *types.PowerUpHostFromStandBy_Task `xml:"urn:vim25 PowerUpHostFromStandBy_Task,omitempty"` - Res *types.PowerUpHostFromStandBy_TaskResponse `xml:"urn:vim25 PowerUpHostFromStandBy_TaskResponse,omitempty"` + Res *types.PowerUpHostFromStandBy_TaskResponse `xml:"PowerUpHostFromStandBy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8405,7 +9205,7 @@ func PowerUpHostFromStandBy_Task(ctx context.Context, r soap.RoundTripper, req * type PrepareCryptoBody struct { Req *types.PrepareCrypto `xml:"urn:vim25 PrepareCrypto,omitempty"` - Res *types.PrepareCryptoResponse `xml:"urn:vim25 PrepareCryptoResponse,omitempty"` + Res *types.PrepareCryptoResponse `xml:"PrepareCryptoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8425,7 +9225,7 @@ func PrepareCrypto(ctx context.Context, r soap.RoundTripper, req *types.PrepareC type PromoteDisks_TaskBody struct { Req *types.PromoteDisks_Task `xml:"urn:vim25 PromoteDisks_Task,omitempty"` - Res *types.PromoteDisks_TaskResponse `xml:"urn:vim25 PromoteDisks_TaskResponse,omitempty"` + Res *types.PromoteDisks_TaskResponse `xml:"PromoteDisks_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8445,7 +9245,7 @@ func PromoteDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.Prom type PutUsbScanCodesBody struct { Req *types.PutUsbScanCodes `xml:"urn:vim25 PutUsbScanCodes,omitempty"` - Res *types.PutUsbScanCodesResponse `xml:"urn:vim25 PutUsbScanCodesResponse,omitempty"` + Res *types.PutUsbScanCodesResponse `xml:"PutUsbScanCodesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8465,7 +9265,7 @@ func PutUsbScanCodes(ctx context.Context, r soap.RoundTripper, req *types.PutUsb type QueryAnswerFileStatusBody struct { Req *types.QueryAnswerFileStatus `xml:"urn:vim25 QueryAnswerFileStatus,omitempty"` - Res *types.QueryAnswerFileStatusResponse `xml:"urn:vim25 QueryAnswerFileStatusResponse,omitempty"` + Res *types.QueryAnswerFileStatusResponse `xml:"QueryAnswerFileStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8485,7 +9285,7 @@ func QueryAnswerFileStatus(ctx context.Context, r soap.RoundTripper, req *types. type QueryAssignedLicensesBody struct { Req *types.QueryAssignedLicenses `xml:"urn:vim25 QueryAssignedLicenses,omitempty"` - Res *types.QueryAssignedLicensesResponse `xml:"urn:vim25 QueryAssignedLicensesResponse,omitempty"` + Res *types.QueryAssignedLicensesResponse `xml:"QueryAssignedLicensesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8505,7 +9305,7 @@ func QueryAssignedLicenses(ctx context.Context, r soap.RoundTripper, req *types. type QueryAvailableDisksForVmfsBody struct { Req *types.QueryAvailableDisksForVmfs `xml:"urn:vim25 QueryAvailableDisksForVmfs,omitempty"` - Res *types.QueryAvailableDisksForVmfsResponse `xml:"urn:vim25 QueryAvailableDisksForVmfsResponse,omitempty"` + Res *types.QueryAvailableDisksForVmfsResponse `xml:"QueryAvailableDisksForVmfsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8525,7 +9325,7 @@ func QueryAvailableDisksForVmfs(ctx context.Context, r soap.RoundTripper, req *t type QueryAvailableDvsSpecBody struct { Req *types.QueryAvailableDvsSpec `xml:"urn:vim25 QueryAvailableDvsSpec,omitempty"` - Res *types.QueryAvailableDvsSpecResponse `xml:"urn:vim25 QueryAvailableDvsSpecResponse,omitempty"` + Res *types.QueryAvailableDvsSpecResponse `xml:"QueryAvailableDvsSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8545,7 +9345,7 @@ func QueryAvailableDvsSpec(ctx context.Context, r soap.RoundTripper, req *types. type QueryAvailablePartitionBody struct { Req *types.QueryAvailablePartition `xml:"urn:vim25 QueryAvailablePartition,omitempty"` - Res *types.QueryAvailablePartitionResponse `xml:"urn:vim25 QueryAvailablePartitionResponse,omitempty"` + Res *types.QueryAvailablePartitionResponse `xml:"QueryAvailablePartitionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8565,7 +9365,7 @@ func QueryAvailablePartition(ctx context.Context, r soap.RoundTripper, req *type type QueryAvailablePerfMetricBody struct { Req *types.QueryAvailablePerfMetric `xml:"urn:vim25 QueryAvailablePerfMetric,omitempty"` - Res *types.QueryAvailablePerfMetricResponse `xml:"urn:vim25 QueryAvailablePerfMetricResponse,omitempty"` + Res *types.QueryAvailablePerfMetricResponse `xml:"QueryAvailablePerfMetricResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8585,7 +9385,7 @@ func QueryAvailablePerfMetric(ctx context.Context, r soap.RoundTripper, req *typ type QueryAvailableSsdsBody struct { Req *types.QueryAvailableSsds `xml:"urn:vim25 QueryAvailableSsds,omitempty"` - Res *types.QueryAvailableSsdsResponse `xml:"urn:vim25 QueryAvailableSsdsResponse,omitempty"` + Res *types.QueryAvailableSsdsResponse `xml:"QueryAvailableSsdsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8605,7 +9405,7 @@ func QueryAvailableSsds(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryAvailableTimeZonesBody struct { Req *types.QueryAvailableTimeZones `xml:"urn:vim25 QueryAvailableTimeZones,omitempty"` - Res *types.QueryAvailableTimeZonesResponse `xml:"urn:vim25 QueryAvailableTimeZonesResponse,omitempty"` + Res *types.QueryAvailableTimeZonesResponse `xml:"QueryAvailableTimeZonesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8625,7 +9425,7 @@ func QueryAvailableTimeZones(ctx context.Context, r soap.RoundTripper, req *type type QueryBootDevicesBody struct { Req *types.QueryBootDevices `xml:"urn:vim25 QueryBootDevices,omitempty"` - Res *types.QueryBootDevicesResponse `xml:"urn:vim25 QueryBootDevicesResponse,omitempty"` + Res *types.QueryBootDevicesResponse `xml:"QueryBootDevicesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8645,7 +9445,7 @@ func QueryBootDevices(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryBoundVnicsBody struct { Req *types.QueryBoundVnics `xml:"urn:vim25 QueryBoundVnics,omitempty"` - Res *types.QueryBoundVnicsResponse `xml:"urn:vim25 QueryBoundVnicsResponse,omitempty"` + Res *types.QueryBoundVnicsResponse `xml:"QueryBoundVnicsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8665,7 +9465,7 @@ func QueryBoundVnics(ctx context.Context, r soap.RoundTripper, req *types.QueryB type QueryCandidateNicsBody struct { Req *types.QueryCandidateNics `xml:"urn:vim25 QueryCandidateNics,omitempty"` - Res *types.QueryCandidateNicsResponse `xml:"urn:vim25 QueryCandidateNicsResponse,omitempty"` + Res *types.QueryCandidateNicsResponse `xml:"QueryCandidateNicsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8685,7 +9485,7 @@ func QueryCandidateNics(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryChangedDiskAreasBody struct { Req *types.QueryChangedDiskAreas `xml:"urn:vim25 QueryChangedDiskAreas,omitempty"` - Res *types.QueryChangedDiskAreasResponse `xml:"urn:vim25 QueryChangedDiskAreasResponse,omitempty"` + Res *types.QueryChangedDiskAreasResponse `xml:"QueryChangedDiskAreasResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8705,7 +9505,7 @@ func QueryChangedDiskAreas(ctx context.Context, r soap.RoundTripper, req *types. type QueryCmmdsBody struct { Req *types.QueryCmmds `xml:"urn:vim25 QueryCmmds,omitempty"` - Res *types.QueryCmmdsResponse `xml:"urn:vim25 QueryCmmdsResponse,omitempty"` + Res *types.QueryCmmdsResponse `xml:"QueryCmmdsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8725,7 +9525,7 @@ func QueryCmmds(ctx context.Context, r soap.RoundTripper, req *types.QueryCmmds) type QueryCompatibleHostForExistingDvsBody struct { Req *types.QueryCompatibleHostForExistingDvs `xml:"urn:vim25 QueryCompatibleHostForExistingDvs,omitempty"` - Res *types.QueryCompatibleHostForExistingDvsResponse `xml:"urn:vim25 QueryCompatibleHostForExistingDvsResponse,omitempty"` + Res *types.QueryCompatibleHostForExistingDvsResponse `xml:"QueryCompatibleHostForExistingDvsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8745,7 +9545,7 @@ func QueryCompatibleHostForExistingDvs(ctx context.Context, r soap.RoundTripper, type QueryCompatibleHostForNewDvsBody struct { Req *types.QueryCompatibleHostForNewDvs `xml:"urn:vim25 QueryCompatibleHostForNewDvs,omitempty"` - Res *types.QueryCompatibleHostForNewDvsResponse `xml:"urn:vim25 QueryCompatibleHostForNewDvsResponse,omitempty"` + Res *types.QueryCompatibleHostForNewDvsResponse `xml:"QueryCompatibleHostForNewDvsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8765,7 +9565,7 @@ func QueryCompatibleHostForNewDvs(ctx context.Context, r soap.RoundTripper, req type QueryComplianceStatusBody struct { Req *types.QueryComplianceStatus `xml:"urn:vim25 QueryComplianceStatus,omitempty"` - Res *types.QueryComplianceStatusResponse `xml:"urn:vim25 QueryComplianceStatusResponse,omitempty"` + Res *types.QueryComplianceStatusResponse `xml:"QueryComplianceStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8785,7 +9585,7 @@ func QueryComplianceStatus(ctx context.Context, r soap.RoundTripper, req *types. type QueryConfigOptionBody struct { Req *types.QueryConfigOption `xml:"urn:vim25 QueryConfigOption,omitempty"` - Res *types.QueryConfigOptionResponse `xml:"urn:vim25 QueryConfigOptionResponse,omitempty"` + Res *types.QueryConfigOptionResponse `xml:"QueryConfigOptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8805,7 +9605,7 @@ func QueryConfigOption(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryConfigOptionDescriptorBody struct { Req *types.QueryConfigOptionDescriptor `xml:"urn:vim25 QueryConfigOptionDescriptor,omitempty"` - Res *types.QueryConfigOptionDescriptorResponse `xml:"urn:vim25 QueryConfigOptionDescriptorResponse,omitempty"` + Res *types.QueryConfigOptionDescriptorResponse `xml:"QueryConfigOptionDescriptorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8825,7 +9625,7 @@ func QueryConfigOptionDescriptor(ctx context.Context, r soap.RoundTripper, req * type QueryConfigOptionExBody struct { Req *types.QueryConfigOptionEx `xml:"urn:vim25 QueryConfigOptionEx,omitempty"` - Res *types.QueryConfigOptionExResponse `xml:"urn:vim25 QueryConfigOptionExResponse,omitempty"` + Res *types.QueryConfigOptionExResponse `xml:"QueryConfigOptionExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8845,7 +9645,7 @@ func QueryConfigOptionEx(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryConfigTargetBody struct { Req *types.QueryConfigTarget `xml:"urn:vim25 QueryConfigTarget,omitempty"` - Res *types.QueryConfigTargetResponse `xml:"urn:vim25 QueryConfigTargetResponse,omitempty"` + Res *types.QueryConfigTargetResponse `xml:"QueryConfigTargetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8865,7 +9665,7 @@ func QueryConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryConfiguredModuleOptionStringBody struct { Req *types.QueryConfiguredModuleOptionString `xml:"urn:vim25 QueryConfiguredModuleOptionString,omitempty"` - Res *types.QueryConfiguredModuleOptionStringResponse `xml:"urn:vim25 QueryConfiguredModuleOptionStringResponse,omitempty"` + Res *types.QueryConfiguredModuleOptionStringResponse `xml:"QueryConfiguredModuleOptionStringResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8885,7 +9685,7 @@ func QueryConfiguredModuleOptionString(ctx context.Context, r soap.RoundTripper, type QueryConnectionInfoBody struct { Req *types.QueryConnectionInfo `xml:"urn:vim25 QueryConnectionInfo,omitempty"` - Res *types.QueryConnectionInfoResponse `xml:"urn:vim25 QueryConnectionInfoResponse,omitempty"` + Res *types.QueryConnectionInfoResponse `xml:"QueryConnectionInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8905,7 +9705,7 @@ func QueryConnectionInfo(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryConnectionInfoViaSpecBody struct { Req *types.QueryConnectionInfoViaSpec `xml:"urn:vim25 QueryConnectionInfoViaSpec,omitempty"` - Res *types.QueryConnectionInfoViaSpecResponse `xml:"urn:vim25 QueryConnectionInfoViaSpecResponse,omitempty"` + Res *types.QueryConnectionInfoViaSpecResponse `xml:"QueryConnectionInfoViaSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8923,9 +9723,49 @@ func QueryConnectionInfoViaSpec(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type QueryConnectionsBody struct { + Req *types.QueryConnections `xml:"urn:vim25 QueryConnections,omitempty"` + Res *types.QueryConnectionsResponse `xml:"QueryConnectionsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryConnectionsBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryConnections(ctx context.Context, r soap.RoundTripper, req *types.QueryConnections) (*types.QueryConnectionsResponse, error) { + var reqBody, resBody QueryConnectionsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type QueryCryptoKeyStatusBody struct { + Req *types.QueryCryptoKeyStatus `xml:"urn:vim25 QueryCryptoKeyStatus,omitempty"` + Res *types.QueryCryptoKeyStatusResponse `xml:"QueryCryptoKeyStatusResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryCryptoKeyStatusBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryCryptoKeyStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryCryptoKeyStatus) (*types.QueryCryptoKeyStatusResponse, error) { + var reqBody, resBody QueryCryptoKeyStatusBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QueryDatastorePerformanceSummaryBody struct { Req *types.QueryDatastorePerformanceSummary `xml:"urn:vim25 QueryDatastorePerformanceSummary,omitempty"` - Res *types.QueryDatastorePerformanceSummaryResponse `xml:"urn:vim25 QueryDatastorePerformanceSummaryResponse,omitempty"` + Res *types.QueryDatastorePerformanceSummaryResponse `xml:"QueryDatastorePerformanceSummaryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8945,7 +9785,7 @@ func QueryDatastorePerformanceSummary(ctx context.Context, r soap.RoundTripper, type QueryDateTimeBody struct { Req *types.QueryDateTime `xml:"urn:vim25 QueryDateTime,omitempty"` - Res *types.QueryDateTimeResponse `xml:"urn:vim25 QueryDateTimeResponse,omitempty"` + Res *types.QueryDateTimeResponse `xml:"QueryDateTimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8965,7 +9805,7 @@ func QueryDateTime(ctx context.Context, r soap.RoundTripper, req *types.QueryDat type QueryDescriptionsBody struct { Req *types.QueryDescriptions `xml:"urn:vim25 QueryDescriptions,omitempty"` - Res *types.QueryDescriptionsResponse `xml:"urn:vim25 QueryDescriptionsResponse,omitempty"` + Res *types.QueryDescriptionsResponse `xml:"QueryDescriptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -8985,7 +9825,7 @@ func QueryDescriptions(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryDisksForVsanBody struct { Req *types.QueryDisksForVsan `xml:"urn:vim25 QueryDisksForVsan,omitempty"` - Res *types.QueryDisksForVsanResponse `xml:"urn:vim25 QueryDisksForVsanResponse,omitempty"` + Res *types.QueryDisksForVsanResponse `xml:"QueryDisksForVsanResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9005,7 +9845,7 @@ func QueryDisksForVsan(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryDisksUsingFilterBody struct { Req *types.QueryDisksUsingFilter `xml:"urn:vim25 QueryDisksUsingFilter,omitempty"` - Res *types.QueryDisksUsingFilterResponse `xml:"urn:vim25 QueryDisksUsingFilterResponse,omitempty"` + Res *types.QueryDisksUsingFilterResponse `xml:"QueryDisksUsingFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9025,7 +9865,7 @@ func QueryDisksUsingFilter(ctx context.Context, r soap.RoundTripper, req *types. type QueryDvsByUuidBody struct { Req *types.QueryDvsByUuid `xml:"urn:vim25 QueryDvsByUuid,omitempty"` - Res *types.QueryDvsByUuidResponse `xml:"urn:vim25 QueryDvsByUuidResponse,omitempty"` + Res *types.QueryDvsByUuidResponse `xml:"QueryDvsByUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9045,7 +9885,7 @@ func QueryDvsByUuid(ctx context.Context, r soap.RoundTripper, req *types.QueryDv type QueryDvsCheckCompatibilityBody struct { Req *types.QueryDvsCheckCompatibility `xml:"urn:vim25 QueryDvsCheckCompatibility,omitempty"` - Res *types.QueryDvsCheckCompatibilityResponse `xml:"urn:vim25 QueryDvsCheckCompatibilityResponse,omitempty"` + Res *types.QueryDvsCheckCompatibilityResponse `xml:"QueryDvsCheckCompatibilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9065,7 +9905,7 @@ func QueryDvsCheckCompatibility(ctx context.Context, r soap.RoundTripper, req *t type QueryDvsCompatibleHostSpecBody struct { Req *types.QueryDvsCompatibleHostSpec `xml:"urn:vim25 QueryDvsCompatibleHostSpec,omitempty"` - Res *types.QueryDvsCompatibleHostSpecResponse `xml:"urn:vim25 QueryDvsCompatibleHostSpecResponse,omitempty"` + Res *types.QueryDvsCompatibleHostSpecResponse `xml:"QueryDvsCompatibleHostSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9085,7 +9925,7 @@ func QueryDvsCompatibleHostSpec(ctx context.Context, r soap.RoundTripper, req *t type QueryDvsConfigTargetBody struct { Req *types.QueryDvsConfigTarget `xml:"urn:vim25 QueryDvsConfigTarget,omitempty"` - Res *types.QueryDvsConfigTargetResponse `xml:"urn:vim25 QueryDvsConfigTargetResponse,omitempty"` + Res *types.QueryDvsConfigTargetResponse `xml:"QueryDvsConfigTargetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9105,7 +9945,7 @@ func QueryDvsConfigTarget(ctx context.Context, r soap.RoundTripper, req *types.Q type QueryDvsFeatureCapabilityBody struct { Req *types.QueryDvsFeatureCapability `xml:"urn:vim25 QueryDvsFeatureCapability,omitempty"` - Res *types.QueryDvsFeatureCapabilityResponse `xml:"urn:vim25 QueryDvsFeatureCapabilityResponse,omitempty"` + Res *types.QueryDvsFeatureCapabilityResponse `xml:"QueryDvsFeatureCapabilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9125,7 +9965,7 @@ func QueryDvsFeatureCapability(ctx context.Context, r soap.RoundTripper, req *ty type QueryEventsBody struct { Req *types.QueryEvents `xml:"urn:vim25 QueryEvents,omitempty"` - Res *types.QueryEventsResponse `xml:"urn:vim25 QueryEventsResponse,omitempty"` + Res *types.QueryEventsResponse `xml:"QueryEventsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9145,7 +9985,7 @@ func QueryEvents(ctx context.Context, r soap.RoundTripper, req *types.QueryEvent type QueryExpressionMetadataBody struct { Req *types.QueryExpressionMetadata `xml:"urn:vim25 QueryExpressionMetadata,omitempty"` - Res *types.QueryExpressionMetadataResponse `xml:"urn:vim25 QueryExpressionMetadataResponse,omitempty"` + Res *types.QueryExpressionMetadataResponse `xml:"QueryExpressionMetadataResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9165,7 +10005,7 @@ func QueryExpressionMetadata(ctx context.Context, r soap.RoundTripper, req *type type QueryExtensionIpAllocationUsageBody struct { Req *types.QueryExtensionIpAllocationUsage `xml:"urn:vim25 QueryExtensionIpAllocationUsage,omitempty"` - Res *types.QueryExtensionIpAllocationUsageResponse `xml:"urn:vim25 QueryExtensionIpAllocationUsageResponse,omitempty"` + Res *types.QueryExtensionIpAllocationUsageResponse `xml:"QueryExtensionIpAllocationUsageResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9185,7 +10025,7 @@ func QueryExtensionIpAllocationUsage(ctx context.Context, r soap.RoundTripper, r type QueryFaultToleranceCompatibilityBody struct { Req *types.QueryFaultToleranceCompatibility `xml:"urn:vim25 QueryFaultToleranceCompatibility,omitempty"` - Res *types.QueryFaultToleranceCompatibilityResponse `xml:"urn:vim25 QueryFaultToleranceCompatibilityResponse,omitempty"` + Res *types.QueryFaultToleranceCompatibilityResponse `xml:"QueryFaultToleranceCompatibilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9205,7 +10045,7 @@ func QueryFaultToleranceCompatibility(ctx context.Context, r soap.RoundTripper, type QueryFaultToleranceCompatibilityExBody struct { Req *types.QueryFaultToleranceCompatibilityEx `xml:"urn:vim25 QueryFaultToleranceCompatibilityEx,omitempty"` - Res *types.QueryFaultToleranceCompatibilityExResponse `xml:"urn:vim25 QueryFaultToleranceCompatibilityExResponse,omitempty"` + Res *types.QueryFaultToleranceCompatibilityExResponse `xml:"QueryFaultToleranceCompatibilityExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9225,7 +10065,7 @@ func QueryFaultToleranceCompatibilityEx(ctx context.Context, r soap.RoundTripper type QueryFilterEntitiesBody struct { Req *types.QueryFilterEntities `xml:"urn:vim25 QueryFilterEntities,omitempty"` - Res *types.QueryFilterEntitiesResponse `xml:"urn:vim25 QueryFilterEntitiesResponse,omitempty"` + Res *types.QueryFilterEntitiesResponse `xml:"QueryFilterEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9245,7 +10085,7 @@ func QueryFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryFilterInfoIdsBody struct { Req *types.QueryFilterInfoIds `xml:"urn:vim25 QueryFilterInfoIds,omitempty"` - Res *types.QueryFilterInfoIdsResponse `xml:"urn:vim25 QueryFilterInfoIdsResponse,omitempty"` + Res *types.QueryFilterInfoIdsResponse `xml:"QueryFilterInfoIdsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9265,7 +10105,7 @@ func QueryFilterInfoIds(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryFilterListBody struct { Req *types.QueryFilterList `xml:"urn:vim25 QueryFilterList,omitempty"` - Res *types.QueryFilterListResponse `xml:"urn:vim25 QueryFilterListResponse,omitempty"` + Res *types.QueryFilterListResponse `xml:"QueryFilterListResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9285,7 +10125,7 @@ func QueryFilterList(ctx context.Context, r soap.RoundTripper, req *types.QueryF type QueryFilterNameBody struct { Req *types.QueryFilterName `xml:"urn:vim25 QueryFilterName,omitempty"` - Res *types.QueryFilterNameResponse `xml:"urn:vim25 QueryFilterNameResponse,omitempty"` + Res *types.QueryFilterNameResponse `xml:"QueryFilterNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9305,7 +10145,7 @@ func QueryFilterName(ctx context.Context, r soap.RoundTripper, req *types.QueryF type QueryFirmwareConfigUploadURLBody struct { Req *types.QueryFirmwareConfigUploadURL `xml:"urn:vim25 QueryFirmwareConfigUploadURL,omitempty"` - Res *types.QueryFirmwareConfigUploadURLResponse `xml:"urn:vim25 QueryFirmwareConfigUploadURLResponse,omitempty"` + Res *types.QueryFirmwareConfigUploadURLResponse `xml:"QueryFirmwareConfigUploadURLResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9325,7 +10165,7 @@ func QueryFirmwareConfigUploadURL(ctx context.Context, r soap.RoundTripper, req type QueryHealthUpdateInfosBody struct { Req *types.QueryHealthUpdateInfos `xml:"urn:vim25 QueryHealthUpdateInfos,omitempty"` - Res *types.QueryHealthUpdateInfosResponse `xml:"urn:vim25 QueryHealthUpdateInfosResponse,omitempty"` + Res *types.QueryHealthUpdateInfosResponse `xml:"QueryHealthUpdateInfosResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9345,7 +10185,7 @@ func QueryHealthUpdateInfos(ctx context.Context, r soap.RoundTripper, req *types type QueryHealthUpdatesBody struct { Req *types.QueryHealthUpdates `xml:"urn:vim25 QueryHealthUpdates,omitempty"` - Res *types.QueryHealthUpdatesResponse `xml:"urn:vim25 QueryHealthUpdatesResponse,omitempty"` + Res *types.QueryHealthUpdatesResponse `xml:"QueryHealthUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9365,7 +10205,7 @@ func QueryHealthUpdates(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryHostConnectionInfoBody struct { Req *types.QueryHostConnectionInfo `xml:"urn:vim25 QueryHostConnectionInfo,omitempty"` - Res *types.QueryHostConnectionInfoResponse `xml:"urn:vim25 QueryHostConnectionInfoResponse,omitempty"` + Res *types.QueryHostConnectionInfoResponse `xml:"QueryHostConnectionInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9385,7 +10225,7 @@ func QueryHostConnectionInfo(ctx context.Context, r soap.RoundTripper, req *type type QueryHostPatch_TaskBody struct { Req *types.QueryHostPatch_Task `xml:"urn:vim25 QueryHostPatch_Task,omitempty"` - Res *types.QueryHostPatch_TaskResponse `xml:"urn:vim25 QueryHostPatch_TaskResponse,omitempty"` + Res *types.QueryHostPatch_TaskResponse `xml:"QueryHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9405,7 +10245,7 @@ func QueryHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryHostProfileMetadataBody struct { Req *types.QueryHostProfileMetadata `xml:"urn:vim25 QueryHostProfileMetadata,omitempty"` - Res *types.QueryHostProfileMetadataResponse `xml:"urn:vim25 QueryHostProfileMetadataResponse,omitempty"` + Res *types.QueryHostProfileMetadataResponse `xml:"QueryHostProfileMetadataResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9425,7 +10265,7 @@ func QueryHostProfileMetadata(ctx context.Context, r soap.RoundTripper, req *typ type QueryHostStatusBody struct { Req *types.QueryHostStatus `xml:"urn:vim25 QueryHostStatus,omitempty"` - Res *types.QueryHostStatusResponse `xml:"urn:vim25 QueryHostStatusResponse,omitempty"` + Res *types.QueryHostStatusResponse `xml:"QueryHostStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9443,9 +10283,29 @@ func QueryHostStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryH return resBody.Res, nil } +type QueryHostsWithAttachedLunBody struct { + Req *types.QueryHostsWithAttachedLun `xml:"urn:vim25 QueryHostsWithAttachedLun,omitempty"` + Res *types.QueryHostsWithAttachedLunResponse `xml:"QueryHostsWithAttachedLunResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryHostsWithAttachedLunBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryHostsWithAttachedLun(ctx context.Context, r soap.RoundTripper, req *types.QueryHostsWithAttachedLun) (*types.QueryHostsWithAttachedLunResponse, error) { + var reqBody, resBody QueryHostsWithAttachedLunBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QueryIORMConfigOptionBody struct { Req *types.QueryIORMConfigOption `xml:"urn:vim25 QueryIORMConfigOption,omitempty"` - Res *types.QueryIORMConfigOptionResponse `xml:"urn:vim25 QueryIORMConfigOptionResponse,omitempty"` + Res *types.QueryIORMConfigOptionResponse `xml:"QueryIORMConfigOptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9465,7 +10325,7 @@ func QueryIORMConfigOption(ctx context.Context, r soap.RoundTripper, req *types. type QueryIPAllocationsBody struct { Req *types.QueryIPAllocations `xml:"urn:vim25 QueryIPAllocations,omitempty"` - Res *types.QueryIPAllocationsResponse `xml:"urn:vim25 QueryIPAllocationsResponse,omitempty"` + Res *types.QueryIPAllocationsResponse `xml:"QueryIPAllocationsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9485,7 +10345,7 @@ func QueryIPAllocations(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryIoFilterInfoBody struct { Req *types.QueryIoFilterInfo `xml:"urn:vim25 QueryIoFilterInfo,omitempty"` - Res *types.QueryIoFilterInfoResponse `xml:"urn:vim25 QueryIoFilterInfoResponse,omitempty"` + Res *types.QueryIoFilterInfoResponse `xml:"QueryIoFilterInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9505,7 +10365,7 @@ func QueryIoFilterInfo(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryIoFilterIssuesBody struct { Req *types.QueryIoFilterIssues `xml:"urn:vim25 QueryIoFilterIssues,omitempty"` - Res *types.QueryIoFilterIssuesResponse `xml:"urn:vim25 QueryIoFilterIssuesResponse,omitempty"` + Res *types.QueryIoFilterIssuesResponse `xml:"QueryIoFilterIssuesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9525,7 +10385,7 @@ func QueryIoFilterIssues(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryIpPoolsBody struct { Req *types.QueryIpPools `xml:"urn:vim25 QueryIpPools,omitempty"` - Res *types.QueryIpPoolsResponse `xml:"urn:vim25 QueryIpPoolsResponse,omitempty"` + Res *types.QueryIpPoolsResponse `xml:"QueryIpPoolsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9545,7 +10405,7 @@ func QueryIpPools(ctx context.Context, r soap.RoundTripper, req *types.QueryIpPo type QueryLicenseSourceAvailabilityBody struct { Req *types.QueryLicenseSourceAvailability `xml:"urn:vim25 QueryLicenseSourceAvailability,omitempty"` - Res *types.QueryLicenseSourceAvailabilityResponse `xml:"urn:vim25 QueryLicenseSourceAvailabilityResponse,omitempty"` + Res *types.QueryLicenseSourceAvailabilityResponse `xml:"QueryLicenseSourceAvailabilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9565,7 +10425,7 @@ func QueryLicenseSourceAvailability(ctx context.Context, r soap.RoundTripper, re type QueryLicenseUsageBody struct { Req *types.QueryLicenseUsage `xml:"urn:vim25 QueryLicenseUsage,omitempty"` - Res *types.QueryLicenseUsageResponse `xml:"urn:vim25 QueryLicenseUsageResponse,omitempty"` + Res *types.QueryLicenseUsageResponse `xml:"QueryLicenseUsageResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9585,7 +10445,7 @@ func QueryLicenseUsage(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryLockdownExceptionsBody struct { Req *types.QueryLockdownExceptions `xml:"urn:vim25 QueryLockdownExceptions,omitempty"` - Res *types.QueryLockdownExceptionsResponse `xml:"urn:vim25 QueryLockdownExceptionsResponse,omitempty"` + Res *types.QueryLockdownExceptionsResponse `xml:"QueryLockdownExceptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9605,7 +10465,7 @@ func QueryLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *type type QueryManagedByBody struct { Req *types.QueryManagedBy `xml:"urn:vim25 QueryManagedBy,omitempty"` - Res *types.QueryManagedByResponse `xml:"urn:vim25 QueryManagedByResponse,omitempty"` + Res *types.QueryManagedByResponse `xml:"QueryManagedByResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9625,7 +10485,7 @@ func QueryManagedBy(ctx context.Context, r soap.RoundTripper, req *types.QueryMa type QueryMemoryOverheadBody struct { Req *types.QueryMemoryOverhead `xml:"urn:vim25 QueryMemoryOverhead,omitempty"` - Res *types.QueryMemoryOverheadResponse `xml:"urn:vim25 QueryMemoryOverheadResponse,omitempty"` + Res *types.QueryMemoryOverheadResponse `xml:"QueryMemoryOverheadResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9645,7 +10505,7 @@ func QueryMemoryOverhead(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryMemoryOverheadExBody struct { Req *types.QueryMemoryOverheadEx `xml:"urn:vim25 QueryMemoryOverheadEx,omitempty"` - Res *types.QueryMemoryOverheadExResponse `xml:"urn:vim25 QueryMemoryOverheadExResponse,omitempty"` + Res *types.QueryMemoryOverheadExResponse `xml:"QueryMemoryOverheadExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9665,7 +10525,7 @@ func QueryMemoryOverheadEx(ctx context.Context, r soap.RoundTripper, req *types. type QueryMigrationDependenciesBody struct { Req *types.QueryMigrationDependencies `xml:"urn:vim25 QueryMigrationDependencies,omitempty"` - Res *types.QueryMigrationDependenciesResponse `xml:"urn:vim25 QueryMigrationDependenciesResponse,omitempty"` + Res *types.QueryMigrationDependenciesResponse `xml:"QueryMigrationDependenciesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9685,7 +10545,7 @@ func QueryMigrationDependencies(ctx context.Context, r soap.RoundTripper, req *t type QueryModulesBody struct { Req *types.QueryModules `xml:"urn:vim25 QueryModules,omitempty"` - Res *types.QueryModulesResponse `xml:"urn:vim25 QueryModulesResponse,omitempty"` + Res *types.QueryModulesResponse `xml:"QueryModulesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9705,7 +10565,7 @@ func QueryModules(ctx context.Context, r soap.RoundTripper, req *types.QueryModu type QueryMonitoredEntitiesBody struct { Req *types.QueryMonitoredEntities `xml:"urn:vim25 QueryMonitoredEntities,omitempty"` - Res *types.QueryMonitoredEntitiesResponse `xml:"urn:vim25 QueryMonitoredEntitiesResponse,omitempty"` + Res *types.QueryMonitoredEntitiesResponse `xml:"QueryMonitoredEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9725,7 +10585,7 @@ func QueryMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *types type QueryNFSUserBody struct { Req *types.QueryNFSUser `xml:"urn:vim25 QueryNFSUser,omitempty"` - Res *types.QueryNFSUserResponse `xml:"urn:vim25 QueryNFSUserResponse,omitempty"` + Res *types.QueryNFSUserResponse `xml:"QueryNFSUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9745,7 +10605,7 @@ func QueryNFSUser(ctx context.Context, r soap.RoundTripper, req *types.QueryNFSU type QueryNetConfigBody struct { Req *types.QueryNetConfig `xml:"urn:vim25 QueryNetConfig,omitempty"` - Res *types.QueryNetConfigResponse `xml:"urn:vim25 QueryNetConfigResponse,omitempty"` + Res *types.QueryNetConfigResponse `xml:"QueryNetConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9765,7 +10625,7 @@ func QueryNetConfig(ctx context.Context, r soap.RoundTripper, req *types.QueryNe type QueryNetworkHintBody struct { Req *types.QueryNetworkHint `xml:"urn:vim25 QueryNetworkHint,omitempty"` - Res *types.QueryNetworkHintResponse `xml:"urn:vim25 QueryNetworkHintResponse,omitempty"` + Res *types.QueryNetworkHintResponse `xml:"QueryNetworkHintResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9785,7 +10645,7 @@ func QueryNetworkHint(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryObjectsOnPhysicalVsanDiskBody struct { Req *types.QueryObjectsOnPhysicalVsanDisk `xml:"urn:vim25 QueryObjectsOnPhysicalVsanDisk,omitempty"` - Res *types.QueryObjectsOnPhysicalVsanDiskResponse `xml:"urn:vim25 QueryObjectsOnPhysicalVsanDiskResponse,omitempty"` + Res *types.QueryObjectsOnPhysicalVsanDiskResponse `xml:"QueryObjectsOnPhysicalVsanDiskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9805,7 +10665,7 @@ func QueryObjectsOnPhysicalVsanDisk(ctx context.Context, r soap.RoundTripper, re type QueryOptionsBody struct { Req *types.QueryOptions `xml:"urn:vim25 QueryOptions,omitempty"` - Res *types.QueryOptionsResponse `xml:"urn:vim25 QueryOptionsResponse,omitempty"` + Res *types.QueryOptionsResponse `xml:"QueryOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9825,7 +10685,7 @@ func QueryOptions(ctx context.Context, r soap.RoundTripper, req *types.QueryOpti type QueryPartitionCreateDescBody struct { Req *types.QueryPartitionCreateDesc `xml:"urn:vim25 QueryPartitionCreateDesc,omitempty"` - Res *types.QueryPartitionCreateDescResponse `xml:"urn:vim25 QueryPartitionCreateDescResponse,omitempty"` + Res *types.QueryPartitionCreateDescResponse `xml:"QueryPartitionCreateDescResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9845,7 +10705,7 @@ func QueryPartitionCreateDesc(ctx context.Context, r soap.RoundTripper, req *typ type QueryPartitionCreateOptionsBody struct { Req *types.QueryPartitionCreateOptions `xml:"urn:vim25 QueryPartitionCreateOptions,omitempty"` - Res *types.QueryPartitionCreateOptionsResponse `xml:"urn:vim25 QueryPartitionCreateOptionsResponse,omitempty"` + Res *types.QueryPartitionCreateOptionsResponse `xml:"QueryPartitionCreateOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9865,7 +10725,7 @@ func QueryPartitionCreateOptions(ctx context.Context, r soap.RoundTripper, req * type QueryPathSelectionPolicyOptionsBody struct { Req *types.QueryPathSelectionPolicyOptions `xml:"urn:vim25 QueryPathSelectionPolicyOptions,omitempty"` - Res *types.QueryPathSelectionPolicyOptionsResponse `xml:"urn:vim25 QueryPathSelectionPolicyOptionsResponse,omitempty"` + Res *types.QueryPathSelectionPolicyOptionsResponse `xml:"QueryPathSelectionPolicyOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9885,7 +10745,7 @@ func QueryPathSelectionPolicyOptions(ctx context.Context, r soap.RoundTripper, r type QueryPerfBody struct { Req *types.QueryPerf `xml:"urn:vim25 QueryPerf,omitempty"` - Res *types.QueryPerfResponse `xml:"urn:vim25 QueryPerfResponse,omitempty"` + Res *types.QueryPerfResponse `xml:"QueryPerfResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9905,7 +10765,7 @@ func QueryPerf(ctx context.Context, r soap.RoundTripper, req *types.QueryPerf) ( type QueryPerfCompositeBody struct { Req *types.QueryPerfComposite `xml:"urn:vim25 QueryPerfComposite,omitempty"` - Res *types.QueryPerfCompositeResponse `xml:"urn:vim25 QueryPerfCompositeResponse,omitempty"` + Res *types.QueryPerfCompositeResponse `xml:"QueryPerfCompositeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9925,7 +10785,7 @@ func QueryPerfComposite(ctx context.Context, r soap.RoundTripper, req *types.Que type QueryPerfCounterBody struct { Req *types.QueryPerfCounter `xml:"urn:vim25 QueryPerfCounter,omitempty"` - Res *types.QueryPerfCounterResponse `xml:"urn:vim25 QueryPerfCounterResponse,omitempty"` + Res *types.QueryPerfCounterResponse `xml:"QueryPerfCounterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9945,7 +10805,7 @@ func QueryPerfCounter(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryPerfCounterByLevelBody struct { Req *types.QueryPerfCounterByLevel `xml:"urn:vim25 QueryPerfCounterByLevel,omitempty"` - Res *types.QueryPerfCounterByLevelResponse `xml:"urn:vim25 QueryPerfCounterByLevelResponse,omitempty"` + Res *types.QueryPerfCounterByLevelResponse `xml:"QueryPerfCounterByLevelResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9965,7 +10825,7 @@ func QueryPerfCounterByLevel(ctx context.Context, r soap.RoundTripper, req *type type QueryPerfProviderSummaryBody struct { Req *types.QueryPerfProviderSummary `xml:"urn:vim25 QueryPerfProviderSummary,omitempty"` - Res *types.QueryPerfProviderSummaryResponse `xml:"urn:vim25 QueryPerfProviderSummaryResponse,omitempty"` + Res *types.QueryPerfProviderSummaryResponse `xml:"QueryPerfProviderSummaryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -9985,7 +10845,7 @@ func QueryPerfProviderSummary(ctx context.Context, r soap.RoundTripper, req *typ type QueryPhysicalVsanDisksBody struct { Req *types.QueryPhysicalVsanDisks `xml:"urn:vim25 QueryPhysicalVsanDisks,omitempty"` - Res *types.QueryPhysicalVsanDisksResponse `xml:"urn:vim25 QueryPhysicalVsanDisksResponse,omitempty"` + Res *types.QueryPhysicalVsanDisksResponse `xml:"QueryPhysicalVsanDisksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10005,7 +10865,7 @@ func QueryPhysicalVsanDisks(ctx context.Context, r soap.RoundTripper, req *types type QueryPnicStatusBody struct { Req *types.QueryPnicStatus `xml:"urn:vim25 QueryPnicStatus,omitempty"` - Res *types.QueryPnicStatusResponse `xml:"urn:vim25 QueryPnicStatusResponse,omitempty"` + Res *types.QueryPnicStatusResponse `xml:"QueryPnicStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10025,7 +10885,7 @@ func QueryPnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryP type QueryPolicyMetadataBody struct { Req *types.QueryPolicyMetadata `xml:"urn:vim25 QueryPolicyMetadata,omitempty"` - Res *types.QueryPolicyMetadataResponse `xml:"urn:vim25 QueryPolicyMetadataResponse,omitempty"` + Res *types.QueryPolicyMetadataResponse `xml:"QueryPolicyMetadataResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10043,9 +10903,29 @@ func QueryPolicyMetadata(ctx context.Context, r soap.RoundTripper, req *types.Qu return resBody.Res, nil } +type QueryProductLockerLocationBody struct { + Req *types.QueryProductLockerLocation `xml:"urn:vim25 QueryProductLockerLocation,omitempty"` + Res *types.QueryProductLockerLocationResponse `xml:"QueryProductLockerLocationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryProductLockerLocationBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryProductLockerLocation(ctx context.Context, r soap.RoundTripper, req *types.QueryProductLockerLocation) (*types.QueryProductLockerLocationResponse, error) { + var reqBody, resBody QueryProductLockerLocationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QueryProfileStructureBody struct { Req *types.QueryProfileStructure `xml:"urn:vim25 QueryProfileStructure,omitempty"` - Res *types.QueryProfileStructureResponse `xml:"urn:vim25 QueryProfileStructureResponse,omitempty"` + Res *types.QueryProfileStructureResponse `xml:"QueryProfileStructureResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10065,7 +10945,7 @@ func QueryProfileStructure(ctx context.Context, r soap.RoundTripper, req *types. type QueryProviderListBody struct { Req *types.QueryProviderList `xml:"urn:vim25 QueryProviderList,omitempty"` - Res *types.QueryProviderListResponse `xml:"urn:vim25 QueryProviderListResponse,omitempty"` + Res *types.QueryProviderListResponse `xml:"QueryProviderListResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10085,7 +10965,7 @@ func QueryProviderList(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryProviderNameBody struct { Req *types.QueryProviderName `xml:"urn:vim25 QueryProviderName,omitempty"` - Res *types.QueryProviderNameResponse `xml:"urn:vim25 QueryProviderNameResponse,omitempty"` + Res *types.QueryProviderNameResponse `xml:"QueryProviderNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10105,7 +10985,7 @@ func QueryProviderName(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryResourceConfigOptionBody struct { Req *types.QueryResourceConfigOption `xml:"urn:vim25 QueryResourceConfigOption,omitempty"` - Res *types.QueryResourceConfigOptionResponse `xml:"urn:vim25 QueryResourceConfigOptionResponse,omitempty"` + Res *types.QueryResourceConfigOptionResponse `xml:"QueryResourceConfigOptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10125,7 +11005,7 @@ func QueryResourceConfigOption(ctx context.Context, r soap.RoundTripper, req *ty type QueryServiceListBody struct { Req *types.QueryServiceList `xml:"urn:vim25 QueryServiceList,omitempty"` - Res *types.QueryServiceListResponse `xml:"urn:vim25 QueryServiceListResponse,omitempty"` + Res *types.QueryServiceListResponse `xml:"QueryServiceListResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10145,7 +11025,7 @@ func QueryServiceList(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryStorageArrayTypePolicyOptionsBody struct { Req *types.QueryStorageArrayTypePolicyOptions `xml:"urn:vim25 QueryStorageArrayTypePolicyOptions,omitempty"` - Res *types.QueryStorageArrayTypePolicyOptionsResponse `xml:"urn:vim25 QueryStorageArrayTypePolicyOptionsResponse,omitempty"` + Res *types.QueryStorageArrayTypePolicyOptionsResponse `xml:"QueryStorageArrayTypePolicyOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10165,7 +11045,7 @@ func QueryStorageArrayTypePolicyOptions(ctx context.Context, r soap.RoundTripper type QuerySupportedFeaturesBody struct { Req *types.QuerySupportedFeatures `xml:"urn:vim25 QuerySupportedFeatures,omitempty"` - Res *types.QuerySupportedFeaturesResponse `xml:"urn:vim25 QuerySupportedFeaturesResponse,omitempty"` + Res *types.QuerySupportedFeaturesResponse `xml:"QuerySupportedFeaturesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10185,7 +11065,7 @@ func QuerySupportedFeatures(ctx context.Context, r soap.RoundTripper, req *types type QuerySyncingVsanObjectsBody struct { Req *types.QuerySyncingVsanObjects `xml:"urn:vim25 QuerySyncingVsanObjects,omitempty"` - Res *types.QuerySyncingVsanObjectsResponse `xml:"urn:vim25 QuerySyncingVsanObjectsResponse,omitempty"` + Res *types.QuerySyncingVsanObjectsResponse `xml:"QuerySyncingVsanObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10205,7 +11085,7 @@ func QuerySyncingVsanObjects(ctx context.Context, r soap.RoundTripper, req *type type QuerySystemUsersBody struct { Req *types.QuerySystemUsers `xml:"urn:vim25 QuerySystemUsers,omitempty"` - Res *types.QuerySystemUsersResponse `xml:"urn:vim25 QuerySystemUsersResponse,omitempty"` + Res *types.QuerySystemUsersResponse `xml:"QuerySystemUsersResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10225,7 +11105,7 @@ func QuerySystemUsers(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryTargetCapabilitiesBody struct { Req *types.QueryTargetCapabilities `xml:"urn:vim25 QueryTargetCapabilities,omitempty"` - Res *types.QueryTargetCapabilitiesResponse `xml:"urn:vim25 QueryTargetCapabilitiesResponse,omitempty"` + Res *types.QueryTargetCapabilitiesResponse `xml:"QueryTargetCapabilitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10245,7 +11125,7 @@ func QueryTargetCapabilities(ctx context.Context, r soap.RoundTripper, req *type type QueryTpmAttestationReportBody struct { Req *types.QueryTpmAttestationReport `xml:"urn:vim25 QueryTpmAttestationReport,omitempty"` - Res *types.QueryTpmAttestationReportResponse `xml:"urn:vim25 QueryTpmAttestationReportResponse,omitempty"` + Res *types.QueryTpmAttestationReportResponse `xml:"QueryTpmAttestationReportResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10265,7 +11145,7 @@ func QueryTpmAttestationReport(ctx context.Context, r soap.RoundTripper, req *ty type QueryUnmonitoredHostsBody struct { Req *types.QueryUnmonitoredHosts `xml:"urn:vim25 QueryUnmonitoredHosts,omitempty"` - Res *types.QueryUnmonitoredHostsResponse `xml:"urn:vim25 QueryUnmonitoredHostsResponse,omitempty"` + Res *types.QueryUnmonitoredHostsResponse `xml:"QueryUnmonitoredHostsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10285,7 +11165,7 @@ func QueryUnmonitoredHosts(ctx context.Context, r soap.RoundTripper, req *types. type QueryUnownedFilesBody struct { Req *types.QueryUnownedFiles `xml:"urn:vim25 QueryUnownedFiles,omitempty"` - Res *types.QueryUnownedFilesResponse `xml:"urn:vim25 QueryUnownedFilesResponse,omitempty"` + Res *types.QueryUnownedFilesResponse `xml:"QueryUnownedFilesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10305,7 +11185,7 @@ func QueryUnownedFiles(ctx context.Context, r soap.RoundTripper, req *types.Quer type QueryUnresolvedVmfsVolumeBody struct { Req *types.QueryUnresolvedVmfsVolume `xml:"urn:vim25 QueryUnresolvedVmfsVolume,omitempty"` - Res *types.QueryUnresolvedVmfsVolumeResponse `xml:"urn:vim25 QueryUnresolvedVmfsVolumeResponse,omitempty"` + Res *types.QueryUnresolvedVmfsVolumeResponse `xml:"QueryUnresolvedVmfsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10325,7 +11205,7 @@ func QueryUnresolvedVmfsVolume(ctx context.Context, r soap.RoundTripper, req *ty type QueryUnresolvedVmfsVolumesBody struct { Req *types.QueryUnresolvedVmfsVolumes `xml:"urn:vim25 QueryUnresolvedVmfsVolumes,omitempty"` - Res *types.QueryUnresolvedVmfsVolumesResponse `xml:"urn:vim25 QueryUnresolvedVmfsVolumesResponse,omitempty"` + Res *types.QueryUnresolvedVmfsVolumesResponse `xml:"QueryUnresolvedVmfsVolumesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10345,7 +11225,7 @@ func QueryUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripper, req *t type QueryUsedVlanIdInDvsBody struct { Req *types.QueryUsedVlanIdInDvs `xml:"urn:vim25 QueryUsedVlanIdInDvs,omitempty"` - Res *types.QueryUsedVlanIdInDvsResponse `xml:"urn:vim25 QueryUsedVlanIdInDvsResponse,omitempty"` + Res *types.QueryUsedVlanIdInDvsResponse `xml:"QueryUsedVlanIdInDvsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10365,7 +11245,7 @@ func QueryUsedVlanIdInDvs(ctx context.Context, r soap.RoundTripper, req *types.Q type QueryVMotionCompatibilityBody struct { Req *types.QueryVMotionCompatibility `xml:"urn:vim25 QueryVMotionCompatibility,omitempty"` - Res *types.QueryVMotionCompatibilityResponse `xml:"urn:vim25 QueryVMotionCompatibilityResponse,omitempty"` + Res *types.QueryVMotionCompatibilityResponse `xml:"QueryVMotionCompatibilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10385,7 +11265,7 @@ func QueryVMotionCompatibility(ctx context.Context, r soap.RoundTripper, req *ty type QueryVMotionCompatibilityEx_TaskBody struct { Req *types.QueryVMotionCompatibilityEx_Task `xml:"urn:vim25 QueryVMotionCompatibilityEx_Task,omitempty"` - Res *types.QueryVMotionCompatibilityEx_TaskResponse `xml:"urn:vim25 QueryVMotionCompatibilityEx_TaskResponse,omitempty"` + Res *types.QueryVMotionCompatibilityEx_TaskResponse `xml:"QueryVMotionCompatibilityEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10405,7 +11285,7 @@ func QueryVMotionCompatibilityEx_Task(ctx context.Context, r soap.RoundTripper, type QueryVirtualDiskFragmentationBody struct { Req *types.QueryVirtualDiskFragmentation `xml:"urn:vim25 QueryVirtualDiskFragmentation,omitempty"` - Res *types.QueryVirtualDiskFragmentationResponse `xml:"urn:vim25 QueryVirtualDiskFragmentationResponse,omitempty"` + Res *types.QueryVirtualDiskFragmentationResponse `xml:"QueryVirtualDiskFragmentationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10425,7 +11305,7 @@ func QueryVirtualDiskFragmentation(ctx context.Context, r soap.RoundTripper, req type QueryVirtualDiskGeometryBody struct { Req *types.QueryVirtualDiskGeometry `xml:"urn:vim25 QueryVirtualDiskGeometry,omitempty"` - Res *types.QueryVirtualDiskGeometryResponse `xml:"urn:vim25 QueryVirtualDiskGeometryResponse,omitempty"` + Res *types.QueryVirtualDiskGeometryResponse `xml:"QueryVirtualDiskGeometryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10445,7 +11325,7 @@ func QueryVirtualDiskGeometry(ctx context.Context, r soap.RoundTripper, req *typ type QueryVirtualDiskUuidBody struct { Req *types.QueryVirtualDiskUuid `xml:"urn:vim25 QueryVirtualDiskUuid,omitempty"` - Res *types.QueryVirtualDiskUuidResponse `xml:"urn:vim25 QueryVirtualDiskUuidResponse,omitempty"` + Res *types.QueryVirtualDiskUuidResponse `xml:"QueryVirtualDiskUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10465,7 +11345,7 @@ func QueryVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.Q type QueryVmfsConfigOptionBody struct { Req *types.QueryVmfsConfigOption `xml:"urn:vim25 QueryVmfsConfigOption,omitempty"` - Res *types.QueryVmfsConfigOptionResponse `xml:"urn:vim25 QueryVmfsConfigOptionResponse,omitempty"` + Res *types.QueryVmfsConfigOptionResponse `xml:"QueryVmfsConfigOptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10485,7 +11365,7 @@ func QueryVmfsConfigOption(ctx context.Context, r soap.RoundTripper, req *types. type QueryVmfsDatastoreCreateOptionsBody struct { Req *types.QueryVmfsDatastoreCreateOptions `xml:"urn:vim25 QueryVmfsDatastoreCreateOptions,omitempty"` - Res *types.QueryVmfsDatastoreCreateOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreCreateOptionsResponse,omitempty"` + Res *types.QueryVmfsDatastoreCreateOptionsResponse `xml:"QueryVmfsDatastoreCreateOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10505,7 +11385,7 @@ func QueryVmfsDatastoreCreateOptions(ctx context.Context, r soap.RoundTripper, r type QueryVmfsDatastoreExpandOptionsBody struct { Req *types.QueryVmfsDatastoreExpandOptions `xml:"urn:vim25 QueryVmfsDatastoreExpandOptions,omitempty"` - Res *types.QueryVmfsDatastoreExpandOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreExpandOptionsResponse,omitempty"` + Res *types.QueryVmfsDatastoreExpandOptionsResponse `xml:"QueryVmfsDatastoreExpandOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10525,7 +11405,7 @@ func QueryVmfsDatastoreExpandOptions(ctx context.Context, r soap.RoundTripper, r type QueryVmfsDatastoreExtendOptionsBody struct { Req *types.QueryVmfsDatastoreExtendOptions `xml:"urn:vim25 QueryVmfsDatastoreExtendOptions,omitempty"` - Res *types.QueryVmfsDatastoreExtendOptionsResponse `xml:"urn:vim25 QueryVmfsDatastoreExtendOptionsResponse,omitempty"` + Res *types.QueryVmfsDatastoreExtendOptionsResponse `xml:"QueryVmfsDatastoreExtendOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10545,7 +11425,7 @@ func QueryVmfsDatastoreExtendOptions(ctx context.Context, r soap.RoundTripper, r type QueryVnicStatusBody struct { Req *types.QueryVnicStatus `xml:"urn:vim25 QueryVnicStatus,omitempty"` - Res *types.QueryVnicStatusResponse `xml:"urn:vim25 QueryVnicStatusResponse,omitempty"` + Res *types.QueryVnicStatusResponse `xml:"QueryVnicStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10565,7 +11445,7 @@ func QueryVnicStatus(ctx context.Context, r soap.RoundTripper, req *types.QueryV type QueryVsanObjectUuidsByFilterBody struct { Req *types.QueryVsanObjectUuidsByFilter `xml:"urn:vim25 QueryVsanObjectUuidsByFilter,omitempty"` - Res *types.QueryVsanObjectUuidsByFilterResponse `xml:"urn:vim25 QueryVsanObjectUuidsByFilterResponse,omitempty"` + Res *types.QueryVsanObjectUuidsByFilterResponse `xml:"QueryVsanObjectUuidsByFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10585,7 +11465,7 @@ func QueryVsanObjectUuidsByFilter(ctx context.Context, r soap.RoundTripper, req type QueryVsanObjectsBody struct { Req *types.QueryVsanObjects `xml:"urn:vim25 QueryVsanObjects,omitempty"` - Res *types.QueryVsanObjectsResponse `xml:"urn:vim25 QueryVsanObjectsResponse,omitempty"` + Res *types.QueryVsanObjectsResponse `xml:"QueryVsanObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10605,7 +11485,7 @@ func QueryVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.Query type QueryVsanStatisticsBody struct { Req *types.QueryVsanStatistics `xml:"urn:vim25 QueryVsanStatistics,omitempty"` - Res *types.QueryVsanStatisticsResponse `xml:"urn:vim25 QueryVsanStatisticsResponse,omitempty"` + Res *types.QueryVsanStatisticsResponse `xml:"QueryVsanStatisticsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10625,7 +11505,7 @@ func QueryVsanStatistics(ctx context.Context, r soap.RoundTripper, req *types.Qu type QueryVsanUpgradeStatusBody struct { Req *types.QueryVsanUpgradeStatus `xml:"urn:vim25 QueryVsanUpgradeStatus,omitempty"` - Res *types.QueryVsanUpgradeStatusResponse `xml:"urn:vim25 QueryVsanUpgradeStatusResponse,omitempty"` + Res *types.QueryVsanUpgradeStatusResponse `xml:"QueryVsanUpgradeStatusResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10645,7 +11525,7 @@ func QueryVsanUpgradeStatus(ctx context.Context, r soap.RoundTripper, req *types type ReadEnvironmentVariableInGuestBody struct { Req *types.ReadEnvironmentVariableInGuest `xml:"urn:vim25 ReadEnvironmentVariableInGuest,omitempty"` - Res *types.ReadEnvironmentVariableInGuestResponse `xml:"urn:vim25 ReadEnvironmentVariableInGuestResponse,omitempty"` + Res *types.ReadEnvironmentVariableInGuestResponse `xml:"ReadEnvironmentVariableInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10665,7 +11545,7 @@ func ReadEnvironmentVariableInGuest(ctx context.Context, r soap.RoundTripper, re type ReadNextEventsBody struct { Req *types.ReadNextEvents `xml:"urn:vim25 ReadNextEvents,omitempty"` - Res *types.ReadNextEventsResponse `xml:"urn:vim25 ReadNextEventsResponse,omitempty"` + Res *types.ReadNextEventsResponse `xml:"ReadNextEventsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10685,7 +11565,7 @@ func ReadNextEvents(ctx context.Context, r soap.RoundTripper, req *types.ReadNex type ReadNextTasksBody struct { Req *types.ReadNextTasks `xml:"urn:vim25 ReadNextTasks,omitempty"` - Res *types.ReadNextTasksResponse `xml:"urn:vim25 ReadNextTasksResponse,omitempty"` + Res *types.ReadNextTasksResponse `xml:"ReadNextTasksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10705,7 +11585,7 @@ func ReadNextTasks(ctx context.Context, r soap.RoundTripper, req *types.ReadNext type ReadPreviousEventsBody struct { Req *types.ReadPreviousEvents `xml:"urn:vim25 ReadPreviousEvents,omitempty"` - Res *types.ReadPreviousEventsResponse `xml:"urn:vim25 ReadPreviousEventsResponse,omitempty"` + Res *types.ReadPreviousEventsResponse `xml:"ReadPreviousEventsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10725,7 +11605,7 @@ func ReadPreviousEvents(ctx context.Context, r soap.RoundTripper, req *types.Rea type ReadPreviousTasksBody struct { Req *types.ReadPreviousTasks `xml:"urn:vim25 ReadPreviousTasks,omitempty"` - Res *types.ReadPreviousTasksResponse `xml:"urn:vim25 ReadPreviousTasksResponse,omitempty"` + Res *types.ReadPreviousTasksResponse `xml:"ReadPreviousTasksResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10745,7 +11625,7 @@ func ReadPreviousTasks(ctx context.Context, r soap.RoundTripper, req *types.Read type RebootGuestBody struct { Req *types.RebootGuest `xml:"urn:vim25 RebootGuest,omitempty"` - Res *types.RebootGuestResponse `xml:"urn:vim25 RebootGuestResponse,omitempty"` + Res *types.RebootGuestResponse `xml:"RebootGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10765,7 +11645,7 @@ func RebootGuest(ctx context.Context, r soap.RoundTripper, req *types.RebootGues type RebootHost_TaskBody struct { Req *types.RebootHost_Task `xml:"urn:vim25 RebootHost_Task,omitempty"` - Res *types.RebootHost_TaskResponse `xml:"urn:vim25 RebootHost_TaskResponse,omitempty"` + Res *types.RebootHost_TaskResponse `xml:"RebootHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10785,7 +11665,7 @@ func RebootHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Reboot type RecommendDatastoresBody struct { Req *types.RecommendDatastores `xml:"urn:vim25 RecommendDatastores,omitempty"` - Res *types.RecommendDatastoresResponse `xml:"urn:vim25 RecommendDatastoresResponse,omitempty"` + Res *types.RecommendDatastoresResponse `xml:"RecommendDatastoresResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10805,7 +11685,7 @@ func RecommendDatastores(ctx context.Context, r soap.RoundTripper, req *types.Re type RecommendHostsForVmBody struct { Req *types.RecommendHostsForVm `xml:"urn:vim25 RecommendHostsForVm,omitempty"` - Res *types.RecommendHostsForVmResponse `xml:"urn:vim25 RecommendHostsForVmResponse,omitempty"` + Res *types.RecommendHostsForVmResponse `xml:"RecommendHostsForVmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10825,7 +11705,7 @@ func RecommendHostsForVm(ctx context.Context, r soap.RoundTripper, req *types.Re type RecommissionVsanNode_TaskBody struct { Req *types.RecommissionVsanNode_Task `xml:"urn:vim25 RecommissionVsanNode_Task,omitempty"` - Res *types.RecommissionVsanNode_TaskResponse `xml:"urn:vim25 RecommissionVsanNode_TaskResponse,omitempty"` + Res *types.RecommissionVsanNode_TaskResponse `xml:"RecommissionVsanNode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10845,7 +11725,7 @@ func RecommissionVsanNode_Task(ctx context.Context, r soap.RoundTripper, req *ty type ReconcileDatastoreInventory_TaskBody struct { Req *types.ReconcileDatastoreInventory_Task `xml:"urn:vim25 ReconcileDatastoreInventory_Task,omitempty"` - Res *types.ReconcileDatastoreInventory_TaskResponse `xml:"urn:vim25 ReconcileDatastoreInventory_TaskResponse,omitempty"` + Res *types.ReconcileDatastoreInventory_TaskResponse `xml:"ReconcileDatastoreInventory_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10865,7 +11745,7 @@ func ReconcileDatastoreInventory_Task(ctx context.Context, r soap.RoundTripper, type ReconfigVM_TaskBody struct { Req *types.ReconfigVM_Task `xml:"urn:vim25 ReconfigVM_Task,omitempty"` - Res *types.ReconfigVM_TaskResponse `xml:"urn:vim25 ReconfigVM_TaskResponse,omitempty"` + Res *types.ReconfigVM_TaskResponse `xml:"ReconfigVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10885,7 +11765,7 @@ func ReconfigVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Reconf type ReconfigurationSatisfiableBody struct { Req *types.ReconfigurationSatisfiable `xml:"urn:vim25 ReconfigurationSatisfiable,omitempty"` - Res *types.ReconfigurationSatisfiableResponse `xml:"urn:vim25 ReconfigurationSatisfiableResponse,omitempty"` + Res *types.ReconfigurationSatisfiableResponse `xml:"ReconfigurationSatisfiableResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10905,7 +11785,7 @@ func ReconfigurationSatisfiable(ctx context.Context, r soap.RoundTripper, req *t type ReconfigureAlarmBody struct { Req *types.ReconfigureAlarm `xml:"urn:vim25 ReconfigureAlarm,omitempty"` - Res *types.ReconfigureAlarmResponse `xml:"urn:vim25 ReconfigureAlarmResponse,omitempty"` + Res *types.ReconfigureAlarmResponse `xml:"ReconfigureAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10925,7 +11805,7 @@ func ReconfigureAlarm(ctx context.Context, r soap.RoundTripper, req *types.Recon type ReconfigureAutostartBody struct { Req *types.ReconfigureAutostart `xml:"urn:vim25 ReconfigureAutostart,omitempty"` - Res *types.ReconfigureAutostartResponse `xml:"urn:vim25 ReconfigureAutostartResponse,omitempty"` + Res *types.ReconfigureAutostartResponse `xml:"ReconfigureAutostartResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10945,7 +11825,7 @@ func ReconfigureAutostart(ctx context.Context, r soap.RoundTripper, req *types.R type ReconfigureCluster_TaskBody struct { Req *types.ReconfigureCluster_Task `xml:"urn:vim25 ReconfigureCluster_Task,omitempty"` - Res *types.ReconfigureCluster_TaskResponse `xml:"urn:vim25 ReconfigureCluster_TaskResponse,omitempty"` + Res *types.ReconfigureCluster_TaskResponse `xml:"ReconfigureCluster_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10965,7 +11845,7 @@ func ReconfigureCluster_Task(ctx context.Context, r soap.RoundTripper, req *type type ReconfigureComputeResource_TaskBody struct { Req *types.ReconfigureComputeResource_Task `xml:"urn:vim25 ReconfigureComputeResource_Task,omitempty"` - Res *types.ReconfigureComputeResource_TaskResponse `xml:"urn:vim25 ReconfigureComputeResource_TaskResponse,omitempty"` + Res *types.ReconfigureComputeResource_TaskResponse `xml:"ReconfigureComputeResource_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -10985,7 +11865,7 @@ func ReconfigureComputeResource_Task(ctx context.Context, r soap.RoundTripper, r type ReconfigureDVPort_TaskBody struct { Req *types.ReconfigureDVPort_Task `xml:"urn:vim25 ReconfigureDVPort_Task,omitempty"` - Res *types.ReconfigureDVPort_TaskResponse `xml:"urn:vim25 ReconfigureDVPort_TaskResponse,omitempty"` + Res *types.ReconfigureDVPort_TaskResponse `xml:"ReconfigureDVPort_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11005,7 +11885,7 @@ func ReconfigureDVPort_Task(ctx context.Context, r soap.RoundTripper, req *types type ReconfigureDVPortgroup_TaskBody struct { Req *types.ReconfigureDVPortgroup_Task `xml:"urn:vim25 ReconfigureDVPortgroup_Task,omitempty"` - Res *types.ReconfigureDVPortgroup_TaskResponse `xml:"urn:vim25 ReconfigureDVPortgroup_TaskResponse,omitempty"` + Res *types.ReconfigureDVPortgroup_TaskResponse `xml:"ReconfigureDVPortgroup_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11025,7 +11905,7 @@ func ReconfigureDVPortgroup_Task(ctx context.Context, r soap.RoundTripper, req * type ReconfigureDatacenter_TaskBody struct { Req *types.ReconfigureDatacenter_Task `xml:"urn:vim25 ReconfigureDatacenter_Task,omitempty"` - Res *types.ReconfigureDatacenter_TaskResponse `xml:"urn:vim25 ReconfigureDatacenter_TaskResponse,omitempty"` + Res *types.ReconfigureDatacenter_TaskResponse `xml:"ReconfigureDatacenter_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11045,7 +11925,7 @@ func ReconfigureDatacenter_Task(ctx context.Context, r soap.RoundTripper, req *t type ReconfigureDomObjectBody struct { Req *types.ReconfigureDomObject `xml:"urn:vim25 ReconfigureDomObject,omitempty"` - Res *types.ReconfigureDomObjectResponse `xml:"urn:vim25 ReconfigureDomObjectResponse,omitempty"` + Res *types.ReconfigureDomObjectResponse `xml:"ReconfigureDomObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11065,7 +11945,7 @@ func ReconfigureDomObject(ctx context.Context, r soap.RoundTripper, req *types.R type ReconfigureDvs_TaskBody struct { Req *types.ReconfigureDvs_Task `xml:"urn:vim25 ReconfigureDvs_Task,omitempty"` - Res *types.ReconfigureDvs_TaskResponse `xml:"urn:vim25 ReconfigureDvs_TaskResponse,omitempty"` + Res *types.ReconfigureDvs_TaskResponse `xml:"ReconfigureDvs_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11085,7 +11965,7 @@ func ReconfigureDvs_Task(ctx context.Context, r soap.RoundTripper, req *types.Re type ReconfigureHostForDAS_TaskBody struct { Req *types.ReconfigureHostForDAS_Task `xml:"urn:vim25 ReconfigureHostForDAS_Task,omitempty"` - Res *types.ReconfigureHostForDAS_TaskResponse `xml:"urn:vim25 ReconfigureHostForDAS_TaskResponse,omitempty"` + Res *types.ReconfigureHostForDAS_TaskResponse `xml:"ReconfigureHostForDAS_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11105,7 +11985,7 @@ func ReconfigureHostForDAS_Task(ctx context.Context, r soap.RoundTripper, req *t type ReconfigureScheduledTaskBody struct { Req *types.ReconfigureScheduledTask `xml:"urn:vim25 ReconfigureScheduledTask,omitempty"` - Res *types.ReconfigureScheduledTaskResponse `xml:"urn:vim25 ReconfigureScheduledTaskResponse,omitempty"` + Res *types.ReconfigureScheduledTaskResponse `xml:"ReconfigureScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11125,7 +12005,7 @@ func ReconfigureScheduledTask(ctx context.Context, r soap.RoundTripper, req *typ type ReconfigureServiceConsoleReservationBody struct { Req *types.ReconfigureServiceConsoleReservation `xml:"urn:vim25 ReconfigureServiceConsoleReservation,omitempty"` - Res *types.ReconfigureServiceConsoleReservationResponse `xml:"urn:vim25 ReconfigureServiceConsoleReservationResponse,omitempty"` + Res *types.ReconfigureServiceConsoleReservationResponse `xml:"ReconfigureServiceConsoleReservationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11145,7 +12025,7 @@ func ReconfigureServiceConsoleReservation(ctx context.Context, r soap.RoundTripp type ReconfigureSnmpAgentBody struct { Req *types.ReconfigureSnmpAgent `xml:"urn:vim25 ReconfigureSnmpAgent,omitempty"` - Res *types.ReconfigureSnmpAgentResponse `xml:"urn:vim25 ReconfigureSnmpAgentResponse,omitempty"` + Res *types.ReconfigureSnmpAgentResponse `xml:"ReconfigureSnmpAgentResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11165,7 +12045,7 @@ func ReconfigureSnmpAgent(ctx context.Context, r soap.RoundTripper, req *types.R type ReconfigureVirtualMachineReservationBody struct { Req *types.ReconfigureVirtualMachineReservation `xml:"urn:vim25 ReconfigureVirtualMachineReservation,omitempty"` - Res *types.ReconfigureVirtualMachineReservationResponse `xml:"urn:vim25 ReconfigureVirtualMachineReservationResponse,omitempty"` + Res *types.ReconfigureVirtualMachineReservationResponse `xml:"ReconfigureVirtualMachineReservationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11185,7 +12065,7 @@ func ReconfigureVirtualMachineReservation(ctx context.Context, r soap.RoundTripp type ReconnectHost_TaskBody struct { Req *types.ReconnectHost_Task `xml:"urn:vim25 ReconnectHost_Task,omitempty"` - Res *types.ReconnectHost_TaskResponse `xml:"urn:vim25 ReconnectHost_TaskResponse,omitempty"` + Res *types.ReconnectHost_TaskResponse `xml:"ReconnectHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11205,7 +12085,7 @@ func ReconnectHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Rec type RectifyDvsHost_TaskBody struct { Req *types.RectifyDvsHost_Task `xml:"urn:vim25 RectifyDvsHost_Task,omitempty"` - Res *types.RectifyDvsHost_TaskResponse `xml:"urn:vim25 RectifyDvsHost_TaskResponse,omitempty"` + Res *types.RectifyDvsHost_TaskResponse `xml:"RectifyDvsHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11225,7 +12105,7 @@ func RectifyDvsHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Re type RectifyDvsOnHost_TaskBody struct { Req *types.RectifyDvsOnHost_Task `xml:"urn:vim25 RectifyDvsOnHost_Task,omitempty"` - Res *types.RectifyDvsOnHost_TaskResponse `xml:"urn:vim25 RectifyDvsOnHost_TaskResponse,omitempty"` + Res *types.RectifyDvsOnHost_TaskResponse `xml:"RectifyDvsOnHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11245,7 +12125,7 @@ func RectifyDvsOnHost_Task(ctx context.Context, r soap.RoundTripper, req *types. type RefreshBody struct { Req *types.Refresh `xml:"urn:vim25 Refresh,omitempty"` - Res *types.RefreshResponse `xml:"urn:vim25 RefreshResponse,omitempty"` + Res *types.RefreshResponse `xml:"RefreshResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11265,7 +12145,7 @@ func Refresh(ctx context.Context, r soap.RoundTripper, req *types.Refresh) (*typ type RefreshDVPortStateBody struct { Req *types.RefreshDVPortState `xml:"urn:vim25 RefreshDVPortState,omitempty"` - Res *types.RefreshDVPortStateResponse `xml:"urn:vim25 RefreshDVPortStateResponse,omitempty"` + Res *types.RefreshDVPortStateResponse `xml:"RefreshDVPortStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11285,7 +12165,7 @@ func RefreshDVPortState(ctx context.Context, r soap.RoundTripper, req *types.Ref type RefreshDatastoreBody struct { Req *types.RefreshDatastore `xml:"urn:vim25 RefreshDatastore,omitempty"` - Res *types.RefreshDatastoreResponse `xml:"urn:vim25 RefreshDatastoreResponse,omitempty"` + Res *types.RefreshDatastoreResponse `xml:"RefreshDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11305,7 +12185,7 @@ func RefreshDatastore(ctx context.Context, r soap.RoundTripper, req *types.Refre type RefreshDatastoreStorageInfoBody struct { Req *types.RefreshDatastoreStorageInfo `xml:"urn:vim25 RefreshDatastoreStorageInfo,omitempty"` - Res *types.RefreshDatastoreStorageInfoResponse `xml:"urn:vim25 RefreshDatastoreStorageInfoResponse,omitempty"` + Res *types.RefreshDatastoreStorageInfoResponse `xml:"RefreshDatastoreStorageInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11325,7 +12205,7 @@ func RefreshDatastoreStorageInfo(ctx context.Context, r soap.RoundTripper, req * type RefreshDateTimeSystemBody struct { Req *types.RefreshDateTimeSystem `xml:"urn:vim25 RefreshDateTimeSystem,omitempty"` - Res *types.RefreshDateTimeSystemResponse `xml:"urn:vim25 RefreshDateTimeSystemResponse,omitempty"` + Res *types.RefreshDateTimeSystemResponse `xml:"RefreshDateTimeSystemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11345,7 +12225,7 @@ func RefreshDateTimeSystem(ctx context.Context, r soap.RoundTripper, req *types. type RefreshFirewallBody struct { Req *types.RefreshFirewall `xml:"urn:vim25 RefreshFirewall,omitempty"` - Res *types.RefreshFirewallResponse `xml:"urn:vim25 RefreshFirewallResponse,omitempty"` + Res *types.RefreshFirewallResponse `xml:"RefreshFirewallResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11365,7 +12245,7 @@ func RefreshFirewall(ctx context.Context, r soap.RoundTripper, req *types.Refres type RefreshGraphicsManagerBody struct { Req *types.RefreshGraphicsManager `xml:"urn:vim25 RefreshGraphicsManager,omitempty"` - Res *types.RefreshGraphicsManagerResponse `xml:"urn:vim25 RefreshGraphicsManagerResponse,omitempty"` + Res *types.RefreshGraphicsManagerResponse `xml:"RefreshGraphicsManagerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11385,7 +12265,7 @@ func RefreshGraphicsManager(ctx context.Context, r soap.RoundTripper, req *types type RefreshHealthStatusSystemBody struct { Req *types.RefreshHealthStatusSystem `xml:"urn:vim25 RefreshHealthStatusSystem,omitempty"` - Res *types.RefreshHealthStatusSystemResponse `xml:"urn:vim25 RefreshHealthStatusSystemResponse,omitempty"` + Res *types.RefreshHealthStatusSystemResponse `xml:"RefreshHealthStatusSystemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11405,7 +12285,7 @@ func RefreshHealthStatusSystem(ctx context.Context, r soap.RoundTripper, req *ty type RefreshNetworkSystemBody struct { Req *types.RefreshNetworkSystem `xml:"urn:vim25 RefreshNetworkSystem,omitempty"` - Res *types.RefreshNetworkSystemResponse `xml:"urn:vim25 RefreshNetworkSystemResponse,omitempty"` + Res *types.RefreshNetworkSystemResponse `xml:"RefreshNetworkSystemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11425,7 +12305,7 @@ func RefreshNetworkSystem(ctx context.Context, r soap.RoundTripper, req *types.R type RefreshRecommendationBody struct { Req *types.RefreshRecommendation `xml:"urn:vim25 RefreshRecommendation,omitempty"` - Res *types.RefreshRecommendationResponse `xml:"urn:vim25 RefreshRecommendationResponse,omitempty"` + Res *types.RefreshRecommendationResponse `xml:"RefreshRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11445,7 +12325,7 @@ func RefreshRecommendation(ctx context.Context, r soap.RoundTripper, req *types. type RefreshRuntimeBody struct { Req *types.RefreshRuntime `xml:"urn:vim25 RefreshRuntime,omitempty"` - Res *types.RefreshRuntimeResponse `xml:"urn:vim25 RefreshRuntimeResponse,omitempty"` + Res *types.RefreshRuntimeResponse `xml:"RefreshRuntimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11465,7 +12345,7 @@ func RefreshRuntime(ctx context.Context, r soap.RoundTripper, req *types.Refresh type RefreshServicesBody struct { Req *types.RefreshServices `xml:"urn:vim25 RefreshServices,omitempty"` - Res *types.RefreshServicesResponse `xml:"urn:vim25 RefreshServicesResponse,omitempty"` + Res *types.RefreshServicesResponse `xml:"RefreshServicesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11485,7 +12365,7 @@ func RefreshServices(ctx context.Context, r soap.RoundTripper, req *types.Refres type RefreshStorageDrsRecommendationBody struct { Req *types.RefreshStorageDrsRecommendation `xml:"urn:vim25 RefreshStorageDrsRecommendation,omitempty"` - Res *types.RefreshStorageDrsRecommendationResponse `xml:"urn:vim25 RefreshStorageDrsRecommendationResponse,omitempty"` + Res *types.RefreshStorageDrsRecommendationResponse `xml:"RefreshStorageDrsRecommendationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11505,7 +12385,7 @@ func RefreshStorageDrsRecommendation(ctx context.Context, r soap.RoundTripper, r type RefreshStorageDrsRecommendationsForPod_TaskBody struct { Req *types.RefreshStorageDrsRecommendationsForPod_Task `xml:"urn:vim25 RefreshStorageDrsRecommendationsForPod_Task,omitempty"` - Res *types.RefreshStorageDrsRecommendationsForPod_TaskResponse `xml:"urn:vim25 RefreshStorageDrsRecommendationsForPod_TaskResponse,omitempty"` + Res *types.RefreshStorageDrsRecommendationsForPod_TaskResponse `xml:"RefreshStorageDrsRecommendationsForPod_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11525,7 +12405,7 @@ func RefreshStorageDrsRecommendationsForPod_Task(ctx context.Context, r soap.Rou type RefreshStorageInfoBody struct { Req *types.RefreshStorageInfo `xml:"urn:vim25 RefreshStorageInfo,omitempty"` - Res *types.RefreshStorageInfoResponse `xml:"urn:vim25 RefreshStorageInfoResponse,omitempty"` + Res *types.RefreshStorageInfoResponse `xml:"RefreshStorageInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11545,7 +12425,7 @@ func RefreshStorageInfo(ctx context.Context, r soap.RoundTripper, req *types.Ref type RefreshStorageSystemBody struct { Req *types.RefreshStorageSystem `xml:"urn:vim25 RefreshStorageSystem,omitempty"` - Res *types.RefreshStorageSystemResponse `xml:"urn:vim25 RefreshStorageSystemResponse,omitempty"` + Res *types.RefreshStorageSystemResponse `xml:"RefreshStorageSystemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11565,7 +12445,7 @@ func RefreshStorageSystem(ctx context.Context, r soap.RoundTripper, req *types.R type RegisterChildVM_TaskBody struct { Req *types.RegisterChildVM_Task `xml:"urn:vim25 RegisterChildVM_Task,omitempty"` - Res *types.RegisterChildVM_TaskResponse `xml:"urn:vim25 RegisterChildVM_TaskResponse,omitempty"` + Res *types.RegisterChildVM_TaskResponse `xml:"RegisterChildVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11585,7 +12465,7 @@ func RegisterChildVM_Task(ctx context.Context, r soap.RoundTripper, req *types.R type RegisterDiskBody struct { Req *types.RegisterDisk `xml:"urn:vim25 RegisterDisk,omitempty"` - Res *types.RegisterDiskResponse `xml:"urn:vim25 RegisterDiskResponse,omitempty"` + Res *types.RegisterDiskResponse `xml:"RegisterDiskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11605,7 +12485,7 @@ func RegisterDisk(ctx context.Context, r soap.RoundTripper, req *types.RegisterD type RegisterExtensionBody struct { Req *types.RegisterExtension `xml:"urn:vim25 RegisterExtension,omitempty"` - Res *types.RegisterExtensionResponse `xml:"urn:vim25 RegisterExtensionResponse,omitempty"` + Res *types.RegisterExtensionResponse `xml:"RegisterExtensionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11625,7 +12505,7 @@ func RegisterExtension(ctx context.Context, r soap.RoundTripper, req *types.Regi type RegisterHealthUpdateProviderBody struct { Req *types.RegisterHealthUpdateProvider `xml:"urn:vim25 RegisterHealthUpdateProvider,omitempty"` - Res *types.RegisterHealthUpdateProviderResponse `xml:"urn:vim25 RegisterHealthUpdateProviderResponse,omitempty"` + Res *types.RegisterHealthUpdateProviderResponse `xml:"RegisterHealthUpdateProviderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11645,7 +12525,7 @@ func RegisterHealthUpdateProvider(ctx context.Context, r soap.RoundTripper, req type RegisterKmipServerBody struct { Req *types.RegisterKmipServer `xml:"urn:vim25 RegisterKmipServer,omitempty"` - Res *types.RegisterKmipServerResponse `xml:"urn:vim25 RegisterKmipServerResponse,omitempty"` + Res *types.RegisterKmipServerResponse `xml:"RegisterKmipServerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11663,9 +12543,29 @@ func RegisterKmipServer(ctx context.Context, r soap.RoundTripper, req *types.Reg return resBody.Res, nil } +type RegisterKmsClusterBody struct { + Req *types.RegisterKmsCluster `xml:"urn:vim25 RegisterKmsCluster,omitempty"` + Res *types.RegisterKmsClusterResponse `xml:"RegisterKmsClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RegisterKmsClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func RegisterKmsCluster(ctx context.Context, r soap.RoundTripper, req *types.RegisterKmsCluster) (*types.RegisterKmsClusterResponse, error) { + var reqBody, resBody RegisterKmsClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RegisterVM_TaskBody struct { Req *types.RegisterVM_Task `xml:"urn:vim25 RegisterVM_Task,omitempty"` - Res *types.RegisterVM_TaskResponse `xml:"urn:vim25 RegisterVM_TaskResponse,omitempty"` + Res *types.RegisterVM_TaskResponse `xml:"RegisterVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11685,7 +12585,7 @@ func RegisterVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Regist type ReleaseCredentialsInGuestBody struct { Req *types.ReleaseCredentialsInGuest `xml:"urn:vim25 ReleaseCredentialsInGuest,omitempty"` - Res *types.ReleaseCredentialsInGuestResponse `xml:"urn:vim25 ReleaseCredentialsInGuestResponse,omitempty"` + Res *types.ReleaseCredentialsInGuestResponse `xml:"ReleaseCredentialsInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11705,7 +12605,7 @@ func ReleaseCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *ty type ReleaseIpAllocationBody struct { Req *types.ReleaseIpAllocation `xml:"urn:vim25 ReleaseIpAllocation,omitempty"` - Res *types.ReleaseIpAllocationResponse `xml:"urn:vim25 ReleaseIpAllocationResponse,omitempty"` + Res *types.ReleaseIpAllocationResponse `xml:"ReleaseIpAllocationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11725,7 +12625,7 @@ func ReleaseIpAllocation(ctx context.Context, r soap.RoundTripper, req *types.Re type ReleaseManagedSnapshotBody struct { Req *types.ReleaseManagedSnapshot `xml:"urn:vim25 ReleaseManagedSnapshot,omitempty"` - Res *types.ReleaseManagedSnapshotResponse `xml:"urn:vim25 ReleaseManagedSnapshotResponse,omitempty"` + Res *types.ReleaseManagedSnapshotResponse `xml:"ReleaseManagedSnapshotResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11745,7 +12645,7 @@ func ReleaseManagedSnapshot(ctx context.Context, r soap.RoundTripper, req *types type ReloadBody struct { Req *types.Reload `xml:"urn:vim25 Reload,omitempty"` - Res *types.ReloadResponse `xml:"urn:vim25 ReloadResponse,omitempty"` + Res *types.ReloadResponse `xml:"ReloadResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11765,7 +12665,7 @@ func Reload(ctx context.Context, r soap.RoundTripper, req *types.Reload) (*types type RelocateVM_TaskBody struct { Req *types.RelocateVM_Task `xml:"urn:vim25 RelocateVM_Task,omitempty"` - Res *types.RelocateVM_TaskResponse `xml:"urn:vim25 RelocateVM_TaskResponse,omitempty"` + Res *types.RelocateVM_TaskResponse `xml:"RelocateVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11785,7 +12685,7 @@ func RelocateVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Reloca type RelocateVStorageObject_TaskBody struct { Req *types.RelocateVStorageObject_Task `xml:"urn:vim25 RelocateVStorageObject_Task,omitempty"` - Res *types.RelocateVStorageObject_TaskResponse `xml:"urn:vim25 RelocateVStorageObject_TaskResponse,omitempty"` + Res *types.RelocateVStorageObject_TaskResponse `xml:"RelocateVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11805,7 +12705,7 @@ func RelocateVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req * type RemoveAlarmBody struct { Req *types.RemoveAlarm `xml:"urn:vim25 RemoveAlarm,omitempty"` - Res *types.RemoveAlarmResponse `xml:"urn:vim25 RemoveAlarmResponse,omitempty"` + Res *types.RemoveAlarmResponse `xml:"RemoveAlarmResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11825,7 +12725,7 @@ func RemoveAlarm(ctx context.Context, r soap.RoundTripper, req *types.RemoveAlar type RemoveAllSnapshots_TaskBody struct { Req *types.RemoveAllSnapshots_Task `xml:"urn:vim25 RemoveAllSnapshots_Task,omitempty"` - Res *types.RemoveAllSnapshots_TaskResponse `xml:"urn:vim25 RemoveAllSnapshots_TaskResponse,omitempty"` + Res *types.RemoveAllSnapshots_TaskResponse `xml:"RemoveAllSnapshots_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11845,7 +12745,7 @@ func RemoveAllSnapshots_Task(ctx context.Context, r soap.RoundTripper, req *type type RemoveAssignedLicenseBody struct { Req *types.RemoveAssignedLicense `xml:"urn:vim25 RemoveAssignedLicense,omitempty"` - Res *types.RemoveAssignedLicenseResponse `xml:"urn:vim25 RemoveAssignedLicenseResponse,omitempty"` + Res *types.RemoveAssignedLicenseResponse `xml:"RemoveAssignedLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11865,7 +12765,7 @@ func RemoveAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types. type RemoveAuthorizationRoleBody struct { Req *types.RemoveAuthorizationRole `xml:"urn:vim25 RemoveAuthorizationRole,omitempty"` - Res *types.RemoveAuthorizationRoleResponse `xml:"urn:vim25 RemoveAuthorizationRoleResponse,omitempty"` + Res *types.RemoveAuthorizationRoleResponse `xml:"RemoveAuthorizationRoleResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11885,7 +12785,7 @@ func RemoveAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *type type RemoveCustomFieldDefBody struct { Req *types.RemoveCustomFieldDef `xml:"urn:vim25 RemoveCustomFieldDef,omitempty"` - Res *types.RemoveCustomFieldDefResponse `xml:"urn:vim25 RemoveCustomFieldDefResponse,omitempty"` + Res *types.RemoveCustomFieldDefResponse `xml:"RemoveCustomFieldDefResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11905,7 +12805,7 @@ func RemoveCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.R type RemoveDatastoreBody struct { Req *types.RemoveDatastore `xml:"urn:vim25 RemoveDatastore,omitempty"` - Res *types.RemoveDatastoreResponse `xml:"urn:vim25 RemoveDatastoreResponse,omitempty"` + Res *types.RemoveDatastoreResponse `xml:"RemoveDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11925,7 +12825,7 @@ func RemoveDatastore(ctx context.Context, r soap.RoundTripper, req *types.Remove type RemoveDatastoreEx_TaskBody struct { Req *types.RemoveDatastoreEx_Task `xml:"urn:vim25 RemoveDatastoreEx_Task,omitempty"` - Res *types.RemoveDatastoreEx_TaskResponse `xml:"urn:vim25 RemoveDatastoreEx_TaskResponse,omitempty"` + Res *types.RemoveDatastoreEx_TaskResponse `xml:"RemoveDatastoreEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11945,7 +12845,7 @@ func RemoveDatastoreEx_Task(ctx context.Context, r soap.RoundTripper, req *types type RemoveDiskMapping_TaskBody struct { Req *types.RemoveDiskMapping_Task `xml:"urn:vim25 RemoveDiskMapping_Task,omitempty"` - Res *types.RemoveDiskMapping_TaskResponse `xml:"urn:vim25 RemoveDiskMapping_TaskResponse,omitempty"` + Res *types.RemoveDiskMapping_TaskResponse `xml:"RemoveDiskMapping_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11965,7 +12865,7 @@ func RemoveDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *types type RemoveDisk_TaskBody struct { Req *types.RemoveDisk_Task `xml:"urn:vim25 RemoveDisk_Task,omitempty"` - Res *types.RemoveDisk_TaskResponse `xml:"urn:vim25 RemoveDisk_TaskResponse,omitempty"` + Res *types.RemoveDisk_TaskResponse `xml:"RemoveDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -11985,7 +12885,7 @@ func RemoveDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.Remove type RemoveEntityPermissionBody struct { Req *types.RemoveEntityPermission `xml:"urn:vim25 RemoveEntityPermission,omitempty"` - Res *types.RemoveEntityPermissionResponse `xml:"urn:vim25 RemoveEntityPermissionResponse,omitempty"` + Res *types.RemoveEntityPermissionResponse `xml:"RemoveEntityPermissionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12005,7 +12905,7 @@ func RemoveEntityPermission(ctx context.Context, r soap.RoundTripper, req *types type RemoveFilterBody struct { Req *types.RemoveFilter `xml:"urn:vim25 RemoveFilter,omitempty"` - Res *types.RemoveFilterResponse `xml:"urn:vim25 RemoveFilterResponse,omitempty"` + Res *types.RemoveFilterResponse `xml:"RemoveFilterResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12025,7 +12925,7 @@ func RemoveFilter(ctx context.Context, r soap.RoundTripper, req *types.RemoveFil type RemoveFilterEntitiesBody struct { Req *types.RemoveFilterEntities `xml:"urn:vim25 RemoveFilterEntities,omitempty"` - Res *types.RemoveFilterEntitiesResponse `xml:"urn:vim25 RemoveFilterEntitiesResponse,omitempty"` + Res *types.RemoveFilterEntitiesResponse `xml:"RemoveFilterEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12045,7 +12945,7 @@ func RemoveFilterEntities(ctx context.Context, r soap.RoundTripper, req *types.R type RemoveGroupBody struct { Req *types.RemoveGroup `xml:"urn:vim25 RemoveGroup,omitempty"` - Res *types.RemoveGroupResponse `xml:"urn:vim25 RemoveGroupResponse,omitempty"` + Res *types.RemoveGroupResponse `xml:"RemoveGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12065,7 +12965,7 @@ func RemoveGroup(ctx context.Context, r soap.RoundTripper, req *types.RemoveGrou type RemoveGuestAliasBody struct { Req *types.RemoveGuestAlias `xml:"urn:vim25 RemoveGuestAlias,omitempty"` - Res *types.RemoveGuestAliasResponse `xml:"urn:vim25 RemoveGuestAliasResponse,omitempty"` + Res *types.RemoveGuestAliasResponse `xml:"RemoveGuestAliasResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12085,7 +12985,7 @@ func RemoveGuestAlias(ctx context.Context, r soap.RoundTripper, req *types.Remov type RemoveGuestAliasByCertBody struct { Req *types.RemoveGuestAliasByCert `xml:"urn:vim25 RemoveGuestAliasByCert,omitempty"` - Res *types.RemoveGuestAliasByCertResponse `xml:"urn:vim25 RemoveGuestAliasByCertResponse,omitempty"` + Res *types.RemoveGuestAliasByCertResponse `xml:"RemoveGuestAliasByCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12105,7 +13005,7 @@ func RemoveGuestAliasByCert(ctx context.Context, r soap.RoundTripper, req *types type RemoveInternetScsiSendTargetsBody struct { Req *types.RemoveInternetScsiSendTargets `xml:"urn:vim25 RemoveInternetScsiSendTargets,omitempty"` - Res *types.RemoveInternetScsiSendTargetsResponse `xml:"urn:vim25 RemoveInternetScsiSendTargetsResponse,omitempty"` + Res *types.RemoveInternetScsiSendTargetsResponse `xml:"RemoveInternetScsiSendTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12125,7 +13025,7 @@ func RemoveInternetScsiSendTargets(ctx context.Context, r soap.RoundTripper, req type RemoveInternetScsiStaticTargetsBody struct { Req *types.RemoveInternetScsiStaticTargets `xml:"urn:vim25 RemoveInternetScsiStaticTargets,omitempty"` - Res *types.RemoveInternetScsiStaticTargetsResponse `xml:"urn:vim25 RemoveInternetScsiStaticTargetsResponse,omitempty"` + Res *types.RemoveInternetScsiStaticTargetsResponse `xml:"RemoveInternetScsiStaticTargetsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12145,7 +13045,7 @@ func RemoveInternetScsiStaticTargets(ctx context.Context, r soap.RoundTripper, r type RemoveKeyBody struct { Req *types.RemoveKey `xml:"urn:vim25 RemoveKey,omitempty"` - Res *types.RemoveKeyResponse `xml:"urn:vim25 RemoveKeyResponse,omitempty"` + Res *types.RemoveKeyResponse `xml:"RemoveKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12165,7 +13065,7 @@ func RemoveKey(ctx context.Context, r soap.RoundTripper, req *types.RemoveKey) ( type RemoveKeysBody struct { Req *types.RemoveKeys `xml:"urn:vim25 RemoveKeys,omitempty"` - Res *types.RemoveKeysResponse `xml:"urn:vim25 RemoveKeysResponse,omitempty"` + Res *types.RemoveKeysResponse `xml:"RemoveKeysResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12185,7 +13085,7 @@ func RemoveKeys(ctx context.Context, r soap.RoundTripper, req *types.RemoveKeys) type RemoveKmipServerBody struct { Req *types.RemoveKmipServer `xml:"urn:vim25 RemoveKmipServer,omitempty"` - Res *types.RemoveKmipServerResponse `xml:"urn:vim25 RemoveKmipServerResponse,omitempty"` + Res *types.RemoveKmipServerResponse `xml:"RemoveKmipServerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12205,7 +13105,7 @@ func RemoveKmipServer(ctx context.Context, r soap.RoundTripper, req *types.Remov type RemoveLicenseBody struct { Req *types.RemoveLicense `xml:"urn:vim25 RemoveLicense,omitempty"` - Res *types.RemoveLicenseResponse `xml:"urn:vim25 RemoveLicenseResponse,omitempty"` + Res *types.RemoveLicenseResponse `xml:"RemoveLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12225,7 +13125,7 @@ func RemoveLicense(ctx context.Context, r soap.RoundTripper, req *types.RemoveLi type RemoveLicenseLabelBody struct { Req *types.RemoveLicenseLabel `xml:"urn:vim25 RemoveLicenseLabel,omitempty"` - Res *types.RemoveLicenseLabelResponse `xml:"urn:vim25 RemoveLicenseLabelResponse,omitempty"` + Res *types.RemoveLicenseLabelResponse `xml:"RemoveLicenseLabelResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12245,7 +13145,7 @@ func RemoveLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.Rem type RemoveMonitoredEntitiesBody struct { Req *types.RemoveMonitoredEntities `xml:"urn:vim25 RemoveMonitoredEntities,omitempty"` - Res *types.RemoveMonitoredEntitiesResponse `xml:"urn:vim25 RemoveMonitoredEntitiesResponse,omitempty"` + Res *types.RemoveMonitoredEntitiesResponse `xml:"RemoveMonitoredEntitiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12265,7 +13165,7 @@ func RemoveMonitoredEntities(ctx context.Context, r soap.RoundTripper, req *type type RemoveNetworkResourcePoolBody struct { Req *types.RemoveNetworkResourcePool `xml:"urn:vim25 RemoveNetworkResourcePool,omitempty"` - Res *types.RemoveNetworkResourcePoolResponse `xml:"urn:vim25 RemoveNetworkResourcePoolResponse,omitempty"` + Res *types.RemoveNetworkResourcePoolResponse `xml:"RemoveNetworkResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12283,9 +13183,29 @@ func RemoveNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *ty return resBody.Res, nil } +type RemoveNvmeOverRdmaAdapterBody struct { + Req *types.RemoveNvmeOverRdmaAdapter `xml:"urn:vim25 RemoveNvmeOverRdmaAdapter,omitempty"` + Res *types.RemoveNvmeOverRdmaAdapterResponse `xml:"RemoveNvmeOverRdmaAdapterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveNvmeOverRdmaAdapterBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveNvmeOverRdmaAdapter(ctx context.Context, r soap.RoundTripper, req *types.RemoveNvmeOverRdmaAdapter) (*types.RemoveNvmeOverRdmaAdapterResponse, error) { + var reqBody, resBody RemoveNvmeOverRdmaAdapterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RemovePerfIntervalBody struct { Req *types.RemovePerfInterval `xml:"urn:vim25 RemovePerfInterval,omitempty"` - Res *types.RemovePerfIntervalResponse `xml:"urn:vim25 RemovePerfIntervalResponse,omitempty"` + Res *types.RemovePerfIntervalResponse `xml:"RemovePerfIntervalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12305,7 +13225,7 @@ func RemovePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.Rem type RemovePortGroupBody struct { Req *types.RemovePortGroup `xml:"urn:vim25 RemovePortGroup,omitempty"` - Res *types.RemovePortGroupResponse `xml:"urn:vim25 RemovePortGroupResponse,omitempty"` + Res *types.RemovePortGroupResponse `xml:"RemovePortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12325,7 +13245,7 @@ func RemovePortGroup(ctx context.Context, r soap.RoundTripper, req *types.Remove type RemoveScheduledTaskBody struct { Req *types.RemoveScheduledTask `xml:"urn:vim25 RemoveScheduledTask,omitempty"` - Res *types.RemoveScheduledTaskResponse `xml:"urn:vim25 RemoveScheduledTaskResponse,omitempty"` + Res *types.RemoveScheduledTaskResponse `xml:"RemoveScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12345,7 +13265,7 @@ func RemoveScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.Re type RemoveServiceConsoleVirtualNicBody struct { Req *types.RemoveServiceConsoleVirtualNic `xml:"urn:vim25 RemoveServiceConsoleVirtualNic,omitempty"` - Res *types.RemoveServiceConsoleVirtualNicResponse `xml:"urn:vim25 RemoveServiceConsoleVirtualNicResponse,omitempty"` + Res *types.RemoveServiceConsoleVirtualNicResponse `xml:"RemoveServiceConsoleVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12365,7 +13285,7 @@ func RemoveServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, re type RemoveSmartCardTrustAnchorBody struct { Req *types.RemoveSmartCardTrustAnchor `xml:"urn:vim25 RemoveSmartCardTrustAnchor,omitempty"` - Res *types.RemoveSmartCardTrustAnchorResponse `xml:"urn:vim25 RemoveSmartCardTrustAnchorResponse,omitempty"` + Res *types.RemoveSmartCardTrustAnchorResponse `xml:"RemoveSmartCardTrustAnchorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12385,7 +13305,7 @@ func RemoveSmartCardTrustAnchor(ctx context.Context, r soap.RoundTripper, req *t type RemoveSmartCardTrustAnchorByFingerprintBody struct { Req *types.RemoveSmartCardTrustAnchorByFingerprint `xml:"urn:vim25 RemoveSmartCardTrustAnchorByFingerprint,omitempty"` - Res *types.RemoveSmartCardTrustAnchorByFingerprintResponse `xml:"urn:vim25 RemoveSmartCardTrustAnchorByFingerprintResponse,omitempty"` + Res *types.RemoveSmartCardTrustAnchorByFingerprintResponse `xml:"RemoveSmartCardTrustAnchorByFingerprintResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12405,14 +13325,34 @@ func RemoveSmartCardTrustAnchorByFingerprint(ctx context.Context, r soap.RoundTr type RemoveSnapshot_TaskBody struct { Req *types.RemoveSnapshot_Task `xml:"urn:vim25 RemoveSnapshot_Task,omitempty"` - Res *types.RemoveSnapshot_TaskResponse `xml:"urn:vim25 RemoveSnapshot_TaskResponse,omitempty"` + Res *types.RemoveSnapshot_TaskResponse `xml:"RemoveSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } -func (b *RemoveSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } +func (b *RemoveSnapshot_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func RemoveSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveSnapshot_Task) (*types.RemoveSnapshot_TaskResponse, error) { + var reqBody, resBody RemoveSnapshot_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RemoveSoftwareAdapterBody struct { + Req *types.RemoveSoftwareAdapter `xml:"urn:vim25 RemoveSoftwareAdapter,omitempty"` + Res *types.RemoveSoftwareAdapterResponse `xml:"RemoveSoftwareAdapterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RemoveSoftwareAdapterBody) Fault() *soap.Fault { return b.Fault_ } -func RemoveSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.RemoveSnapshot_Task) (*types.RemoveSnapshot_TaskResponse, error) { - var reqBody, resBody RemoveSnapshot_TaskBody +func RemoveSoftwareAdapter(ctx context.Context, r soap.RoundTripper, req *types.RemoveSoftwareAdapter) (*types.RemoveSoftwareAdapterResponse, error) { + var reqBody, resBody RemoveSoftwareAdapterBody reqBody.Req = req @@ -12425,7 +13365,7 @@ func RemoveSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types.Re type RemoveUserBody struct { Req *types.RemoveUser `xml:"urn:vim25 RemoveUser,omitempty"` - Res *types.RemoveUserResponse `xml:"urn:vim25 RemoveUserResponse,omitempty"` + Res *types.RemoveUserResponse `xml:"RemoveUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12445,7 +13385,7 @@ func RemoveUser(ctx context.Context, r soap.RoundTripper, req *types.RemoveUser) type RemoveVirtualNicBody struct { Req *types.RemoveVirtualNic `xml:"urn:vim25 RemoveVirtualNic,omitempty"` - Res *types.RemoveVirtualNicResponse `xml:"urn:vim25 RemoveVirtualNicResponse,omitempty"` + Res *types.RemoveVirtualNicResponse `xml:"RemoveVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12465,7 +13405,7 @@ func RemoveVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.Remov type RemoveVirtualSwitchBody struct { Req *types.RemoveVirtualSwitch `xml:"urn:vim25 RemoveVirtualSwitch,omitempty"` - Res *types.RemoveVirtualSwitchResponse `xml:"urn:vim25 RemoveVirtualSwitchResponse,omitempty"` + Res *types.RemoveVirtualSwitchResponse `xml:"RemoveVirtualSwitchResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12485,7 +13425,7 @@ func RemoveVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.Re type RenameCustomFieldDefBody struct { Req *types.RenameCustomFieldDef `xml:"urn:vim25 RenameCustomFieldDef,omitempty"` - Res *types.RenameCustomFieldDefResponse `xml:"urn:vim25 RenameCustomFieldDefResponse,omitempty"` + Res *types.RenameCustomFieldDefResponse `xml:"RenameCustomFieldDefResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12505,7 +13445,7 @@ func RenameCustomFieldDef(ctx context.Context, r soap.RoundTripper, req *types.R type RenameCustomizationSpecBody struct { Req *types.RenameCustomizationSpec `xml:"urn:vim25 RenameCustomizationSpec,omitempty"` - Res *types.RenameCustomizationSpecResponse `xml:"urn:vim25 RenameCustomizationSpecResponse,omitempty"` + Res *types.RenameCustomizationSpecResponse `xml:"RenameCustomizationSpecResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12525,7 +13465,7 @@ func RenameCustomizationSpec(ctx context.Context, r soap.RoundTripper, req *type type RenameDatastoreBody struct { Req *types.RenameDatastore `xml:"urn:vim25 RenameDatastore,omitempty"` - Res *types.RenameDatastoreResponse `xml:"urn:vim25 RenameDatastoreResponse,omitempty"` + Res *types.RenameDatastoreResponse `xml:"RenameDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12545,7 +13485,7 @@ func RenameDatastore(ctx context.Context, r soap.RoundTripper, req *types.Rename type RenameSnapshotBody struct { Req *types.RenameSnapshot `xml:"urn:vim25 RenameSnapshot,omitempty"` - Res *types.RenameSnapshotResponse `xml:"urn:vim25 RenameSnapshotResponse,omitempty"` + Res *types.RenameSnapshotResponse `xml:"RenameSnapshotResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12565,7 +13505,7 @@ func RenameSnapshot(ctx context.Context, r soap.RoundTripper, req *types.RenameS type RenameVStorageObjectBody struct { Req *types.RenameVStorageObject `xml:"urn:vim25 RenameVStorageObject,omitempty"` - Res *types.RenameVStorageObjectResponse `xml:"urn:vim25 RenameVStorageObjectResponse,omitempty"` + Res *types.RenameVStorageObjectResponse `xml:"RenameVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12585,7 +13525,7 @@ func RenameVStorageObject(ctx context.Context, r soap.RoundTripper, req *types.R type Rename_TaskBody struct { Req *types.Rename_Task `xml:"urn:vim25 Rename_Task,omitempty"` - Res *types.Rename_TaskResponse `xml:"urn:vim25 Rename_TaskResponse,omitempty"` + Res *types.Rename_TaskResponse `xml:"Rename_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12605,7 +13545,7 @@ func Rename_Task(ctx context.Context, r soap.RoundTripper, req *types.Rename_Tas type ReplaceCACertificatesAndCRLsBody struct { Req *types.ReplaceCACertificatesAndCRLs `xml:"urn:vim25 ReplaceCACertificatesAndCRLs,omitempty"` - Res *types.ReplaceCACertificatesAndCRLsResponse `xml:"urn:vim25 ReplaceCACertificatesAndCRLsResponse,omitempty"` + Res *types.ReplaceCACertificatesAndCRLsResponse `xml:"ReplaceCACertificatesAndCRLsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12625,7 +13565,7 @@ func ReplaceCACertificatesAndCRLs(ctx context.Context, r soap.RoundTripper, req type ReplaceSmartCardTrustAnchorsBody struct { Req *types.ReplaceSmartCardTrustAnchors `xml:"urn:vim25 ReplaceSmartCardTrustAnchors,omitempty"` - Res *types.ReplaceSmartCardTrustAnchorsResponse `xml:"urn:vim25 ReplaceSmartCardTrustAnchorsResponse,omitempty"` + Res *types.ReplaceSmartCardTrustAnchorsResponse `xml:"ReplaceSmartCardTrustAnchorsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12645,7 +13585,7 @@ func ReplaceSmartCardTrustAnchors(ctx context.Context, r soap.RoundTripper, req type RescanAllHbaBody struct { Req *types.RescanAllHba `xml:"urn:vim25 RescanAllHba,omitempty"` - Res *types.RescanAllHbaResponse `xml:"urn:vim25 RescanAllHbaResponse,omitempty"` + Res *types.RescanAllHbaResponse `xml:"RescanAllHbaResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12665,7 +13605,7 @@ func RescanAllHba(ctx context.Context, r soap.RoundTripper, req *types.RescanAll type RescanHbaBody struct { Req *types.RescanHba `xml:"urn:vim25 RescanHba,omitempty"` - Res *types.RescanHbaResponse `xml:"urn:vim25 RescanHbaResponse,omitempty"` + Res *types.RescanHbaResponse `xml:"RescanHbaResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12685,7 +13625,7 @@ func RescanHba(ctx context.Context, r soap.RoundTripper, req *types.RescanHba) ( type RescanVffsBody struct { Req *types.RescanVffs `xml:"urn:vim25 RescanVffs,omitempty"` - Res *types.RescanVffsResponse `xml:"urn:vim25 RescanVffsResponse,omitempty"` + Res *types.RescanVffsResponse `xml:"RescanVffsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12705,7 +13645,7 @@ func RescanVffs(ctx context.Context, r soap.RoundTripper, req *types.RescanVffs) type RescanVmfsBody struct { Req *types.RescanVmfs `xml:"urn:vim25 RescanVmfs,omitempty"` - Res *types.RescanVmfsResponse `xml:"urn:vim25 RescanVmfsResponse,omitempty"` + Res *types.RescanVmfsResponse `xml:"RescanVmfsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12725,7 +13665,7 @@ func RescanVmfs(ctx context.Context, r soap.RoundTripper, req *types.RescanVmfs) type ResetCollectorBody struct { Req *types.ResetCollector `xml:"urn:vim25 ResetCollector,omitempty"` - Res *types.ResetCollectorResponse `xml:"urn:vim25 ResetCollectorResponse,omitempty"` + Res *types.ResetCollectorResponse `xml:"ResetCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12745,7 +13685,7 @@ func ResetCollector(ctx context.Context, r soap.RoundTripper, req *types.ResetCo type ResetCounterLevelMappingBody struct { Req *types.ResetCounterLevelMapping `xml:"urn:vim25 ResetCounterLevelMapping,omitempty"` - Res *types.ResetCounterLevelMappingResponse `xml:"urn:vim25 ResetCounterLevelMappingResponse,omitempty"` + Res *types.ResetCounterLevelMappingResponse `xml:"ResetCounterLevelMappingResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12765,7 +13705,7 @@ func ResetCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *typ type ResetEntityPermissionsBody struct { Req *types.ResetEntityPermissions `xml:"urn:vim25 ResetEntityPermissions,omitempty"` - Res *types.ResetEntityPermissionsResponse `xml:"urn:vim25 ResetEntityPermissionsResponse,omitempty"` + Res *types.ResetEntityPermissionsResponse `xml:"ResetEntityPermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12785,7 +13725,7 @@ func ResetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types type ResetFirmwareToFactoryDefaultsBody struct { Req *types.ResetFirmwareToFactoryDefaults `xml:"urn:vim25 ResetFirmwareToFactoryDefaults,omitempty"` - Res *types.ResetFirmwareToFactoryDefaultsResponse `xml:"urn:vim25 ResetFirmwareToFactoryDefaultsResponse,omitempty"` + Res *types.ResetFirmwareToFactoryDefaultsResponse `xml:"ResetFirmwareToFactoryDefaultsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12805,7 +13745,7 @@ func ResetFirmwareToFactoryDefaults(ctx context.Context, r soap.RoundTripper, re type ResetGuestInformationBody struct { Req *types.ResetGuestInformation `xml:"urn:vim25 ResetGuestInformation,omitempty"` - Res *types.ResetGuestInformationResponse `xml:"urn:vim25 ResetGuestInformationResponse,omitempty"` + Res *types.ResetGuestInformationResponse `xml:"ResetGuestInformationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12825,7 +13765,7 @@ func ResetGuestInformation(ctx context.Context, r soap.RoundTripper, req *types. type ResetListViewBody struct { Req *types.ResetListView `xml:"urn:vim25 ResetListView,omitempty"` - Res *types.ResetListViewResponse `xml:"urn:vim25 ResetListViewResponse,omitempty"` + Res *types.ResetListViewResponse `xml:"ResetListViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12845,7 +13785,7 @@ func ResetListView(ctx context.Context, r soap.RoundTripper, req *types.ResetLis type ResetListViewFromViewBody struct { Req *types.ResetListViewFromView `xml:"urn:vim25 ResetListViewFromView,omitempty"` - Res *types.ResetListViewFromViewResponse `xml:"urn:vim25 ResetListViewFromViewResponse,omitempty"` + Res *types.ResetListViewFromViewResponse `xml:"ResetListViewFromViewResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12865,7 +13805,7 @@ func ResetListViewFromView(ctx context.Context, r soap.RoundTripper, req *types. type ResetSystemHealthInfoBody struct { Req *types.ResetSystemHealthInfo `xml:"urn:vim25 ResetSystemHealthInfo,omitempty"` - Res *types.ResetSystemHealthInfoResponse `xml:"urn:vim25 ResetSystemHealthInfoResponse,omitempty"` + Res *types.ResetSystemHealthInfoResponse `xml:"ResetSystemHealthInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12885,7 +13825,7 @@ func ResetSystemHealthInfo(ctx context.Context, r soap.RoundTripper, req *types. type ResetVM_TaskBody struct { Req *types.ResetVM_Task `xml:"urn:vim25 ResetVM_Task,omitempty"` - Res *types.ResetVM_TaskResponse `xml:"urn:vim25 ResetVM_TaskResponse,omitempty"` + Res *types.ResetVM_TaskResponse `xml:"ResetVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12905,7 +13845,7 @@ func ResetVM_Task(ctx context.Context, r soap.RoundTripper, req *types.ResetVM_T type ResignatureUnresolvedVmfsVolume_TaskBody struct { Req *types.ResignatureUnresolvedVmfsVolume_Task `xml:"urn:vim25 ResignatureUnresolvedVmfsVolume_Task,omitempty"` - Res *types.ResignatureUnresolvedVmfsVolume_TaskResponse `xml:"urn:vim25 ResignatureUnresolvedVmfsVolume_TaskResponse,omitempty"` + Res *types.ResignatureUnresolvedVmfsVolume_TaskResponse `xml:"ResignatureUnresolvedVmfsVolume_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12925,7 +13865,7 @@ func ResignatureUnresolvedVmfsVolume_Task(ctx context.Context, r soap.RoundTripp type ResolveInstallationErrorsOnCluster_TaskBody struct { Req *types.ResolveInstallationErrorsOnCluster_Task `xml:"urn:vim25 ResolveInstallationErrorsOnCluster_Task,omitempty"` - Res *types.ResolveInstallationErrorsOnCluster_TaskResponse `xml:"urn:vim25 ResolveInstallationErrorsOnCluster_TaskResponse,omitempty"` + Res *types.ResolveInstallationErrorsOnCluster_TaskResponse `xml:"ResolveInstallationErrorsOnCluster_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12945,7 +13885,7 @@ func ResolveInstallationErrorsOnCluster_Task(ctx context.Context, r soap.RoundTr type ResolveInstallationErrorsOnHost_TaskBody struct { Req *types.ResolveInstallationErrorsOnHost_Task `xml:"urn:vim25 ResolveInstallationErrorsOnHost_Task,omitempty"` - Res *types.ResolveInstallationErrorsOnHost_TaskResponse `xml:"urn:vim25 ResolveInstallationErrorsOnHost_TaskResponse,omitempty"` + Res *types.ResolveInstallationErrorsOnHost_TaskResponse `xml:"ResolveInstallationErrorsOnHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12965,7 +13905,7 @@ func ResolveInstallationErrorsOnHost_Task(ctx context.Context, r soap.RoundTripp type ResolveMultipleUnresolvedVmfsVolumesBody struct { Req *types.ResolveMultipleUnresolvedVmfsVolumes `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumes,omitempty"` - Res *types.ResolveMultipleUnresolvedVmfsVolumesResponse `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesResponse,omitempty"` + Res *types.ResolveMultipleUnresolvedVmfsVolumesResponse `xml:"ResolveMultipleUnresolvedVmfsVolumesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -12985,7 +13925,7 @@ func ResolveMultipleUnresolvedVmfsVolumes(ctx context.Context, r soap.RoundTripp type ResolveMultipleUnresolvedVmfsVolumesEx_TaskBody struct { Req *types.ResolveMultipleUnresolvedVmfsVolumesEx_Task `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_Task,omitempty"` - Res *types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse `xml:"urn:vim25 ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse,omitempty"` + Res *types.ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse `xml:"ResolveMultipleUnresolvedVmfsVolumesEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13005,7 +13945,7 @@ func ResolveMultipleUnresolvedVmfsVolumesEx_Task(ctx context.Context, r soap.Rou type RestartServiceBody struct { Req *types.RestartService `xml:"urn:vim25 RestartService,omitempty"` - Res *types.RestartServiceResponse `xml:"urn:vim25 RestartServiceResponse,omitempty"` + Res *types.RestartServiceResponse `xml:"RestartServiceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13025,7 +13965,7 @@ func RestartService(ctx context.Context, r soap.RoundTripper, req *types.Restart type RestartServiceConsoleVirtualNicBody struct { Req *types.RestartServiceConsoleVirtualNic `xml:"urn:vim25 RestartServiceConsoleVirtualNic,omitempty"` - Res *types.RestartServiceConsoleVirtualNicResponse `xml:"urn:vim25 RestartServiceConsoleVirtualNicResponse,omitempty"` + Res *types.RestartServiceConsoleVirtualNicResponse `xml:"RestartServiceConsoleVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13045,7 +13985,7 @@ func RestartServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, r type RestoreFirmwareConfigurationBody struct { Req *types.RestoreFirmwareConfiguration `xml:"urn:vim25 RestoreFirmwareConfiguration,omitempty"` - Res *types.RestoreFirmwareConfigurationResponse `xml:"urn:vim25 RestoreFirmwareConfigurationResponse,omitempty"` + Res *types.RestoreFirmwareConfigurationResponse `xml:"RestoreFirmwareConfigurationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13065,7 +14005,7 @@ func RestoreFirmwareConfiguration(ctx context.Context, r soap.RoundTripper, req type RetrieveAllPermissionsBody struct { Req *types.RetrieveAllPermissions `xml:"urn:vim25 RetrieveAllPermissions,omitempty"` - Res *types.RetrieveAllPermissionsResponse `xml:"urn:vim25 RetrieveAllPermissionsResponse,omitempty"` + Res *types.RetrieveAllPermissionsResponse `xml:"RetrieveAllPermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13085,7 +14025,7 @@ func RetrieveAllPermissions(ctx context.Context, r soap.RoundTripper, req *types type RetrieveAnswerFileBody struct { Req *types.RetrieveAnswerFile `xml:"urn:vim25 RetrieveAnswerFile,omitempty"` - Res *types.RetrieveAnswerFileResponse `xml:"urn:vim25 RetrieveAnswerFileResponse,omitempty"` + Res *types.RetrieveAnswerFileResponse `xml:"RetrieveAnswerFileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13105,7 +14045,7 @@ func RetrieveAnswerFile(ctx context.Context, r soap.RoundTripper, req *types.Ret type RetrieveAnswerFileForProfileBody struct { Req *types.RetrieveAnswerFileForProfile `xml:"urn:vim25 RetrieveAnswerFileForProfile,omitempty"` - Res *types.RetrieveAnswerFileForProfileResponse `xml:"urn:vim25 RetrieveAnswerFileForProfileResponse,omitempty"` + Res *types.RetrieveAnswerFileForProfileResponse `xml:"RetrieveAnswerFileForProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13125,7 +14065,7 @@ func RetrieveAnswerFileForProfile(ctx context.Context, r soap.RoundTripper, req type RetrieveArgumentDescriptionBody struct { Req *types.RetrieveArgumentDescription `xml:"urn:vim25 RetrieveArgumentDescription,omitempty"` - Res *types.RetrieveArgumentDescriptionResponse `xml:"urn:vim25 RetrieveArgumentDescriptionResponse,omitempty"` + Res *types.RetrieveArgumentDescriptionResponse `xml:"RetrieveArgumentDescriptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13145,7 +14085,7 @@ func RetrieveArgumentDescription(ctx context.Context, r soap.RoundTripper, req * type RetrieveClientCertBody struct { Req *types.RetrieveClientCert `xml:"urn:vim25 RetrieveClientCert,omitempty"` - Res *types.RetrieveClientCertResponse `xml:"urn:vim25 RetrieveClientCertResponse,omitempty"` + Res *types.RetrieveClientCertResponse `xml:"RetrieveClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13165,7 +14105,7 @@ func RetrieveClientCert(ctx context.Context, r soap.RoundTripper, req *types.Ret type RetrieveClientCsrBody struct { Req *types.RetrieveClientCsr `xml:"urn:vim25 RetrieveClientCsr,omitempty"` - Res *types.RetrieveClientCsrResponse `xml:"urn:vim25 RetrieveClientCsrResponse,omitempty"` + Res *types.RetrieveClientCsrResponse `xml:"RetrieveClientCsrResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13185,7 +14125,7 @@ func RetrieveClientCsr(ctx context.Context, r soap.RoundTripper, req *types.Retr type RetrieveDasAdvancedRuntimeInfoBody struct { Req *types.RetrieveDasAdvancedRuntimeInfo `xml:"urn:vim25 RetrieveDasAdvancedRuntimeInfo,omitempty"` - Res *types.RetrieveDasAdvancedRuntimeInfoResponse `xml:"urn:vim25 RetrieveDasAdvancedRuntimeInfoResponse,omitempty"` + Res *types.RetrieveDasAdvancedRuntimeInfoResponse `xml:"RetrieveDasAdvancedRuntimeInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13205,7 +14145,7 @@ func RetrieveDasAdvancedRuntimeInfo(ctx context.Context, r soap.RoundTripper, re type RetrieveDescriptionBody struct { Req *types.RetrieveDescription `xml:"urn:vim25 RetrieveDescription,omitempty"` - Res *types.RetrieveDescriptionResponse `xml:"urn:vim25 RetrieveDescriptionResponse,omitempty"` + Res *types.RetrieveDescriptionResponse `xml:"RetrieveDescriptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13225,7 +14165,7 @@ func RetrieveDescription(ctx context.Context, r soap.RoundTripper, req *types.Re type RetrieveDiskPartitionInfoBody struct { Req *types.RetrieveDiskPartitionInfo `xml:"urn:vim25 RetrieveDiskPartitionInfo,omitempty"` - Res *types.RetrieveDiskPartitionInfoResponse `xml:"urn:vim25 RetrieveDiskPartitionInfoResponse,omitempty"` + Res *types.RetrieveDiskPartitionInfoResponse `xml:"RetrieveDiskPartitionInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13243,9 +14183,29 @@ func RetrieveDiskPartitionInfo(ctx context.Context, r soap.RoundTripper, req *ty return resBody.Res, nil } +type RetrieveDynamicPassthroughInfoBody struct { + Req *types.RetrieveDynamicPassthroughInfo `xml:"urn:vim25 RetrieveDynamicPassthroughInfo,omitempty"` + Res *types.RetrieveDynamicPassthroughInfoResponse `xml:"RetrieveDynamicPassthroughInfoResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveDynamicPassthroughInfoBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveDynamicPassthroughInfo(ctx context.Context, r soap.RoundTripper, req *types.RetrieveDynamicPassthroughInfo) (*types.RetrieveDynamicPassthroughInfoResponse, error) { + var reqBody, resBody RetrieveDynamicPassthroughInfoBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RetrieveEntityPermissionsBody struct { Req *types.RetrieveEntityPermissions `xml:"urn:vim25 RetrieveEntityPermissions,omitempty"` - Res *types.RetrieveEntityPermissionsResponse `xml:"urn:vim25 RetrieveEntityPermissionsResponse,omitempty"` + Res *types.RetrieveEntityPermissionsResponse `xml:"RetrieveEntityPermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13265,7 +14225,7 @@ func RetrieveEntityPermissions(ctx context.Context, r soap.RoundTripper, req *ty type RetrieveEntityScheduledTaskBody struct { Req *types.RetrieveEntityScheduledTask `xml:"urn:vim25 RetrieveEntityScheduledTask,omitempty"` - Res *types.RetrieveEntityScheduledTaskResponse `xml:"urn:vim25 RetrieveEntityScheduledTaskResponse,omitempty"` + Res *types.RetrieveEntityScheduledTaskResponse `xml:"RetrieveEntityScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13283,9 +14243,29 @@ func RetrieveEntityScheduledTask(ctx context.Context, r soap.RoundTripper, req * return resBody.Res, nil } +type RetrieveFreeEpcMemoryBody struct { + Req *types.RetrieveFreeEpcMemory `xml:"urn:vim25 RetrieveFreeEpcMemory,omitempty"` + Res *types.RetrieveFreeEpcMemoryResponse `xml:"RetrieveFreeEpcMemoryResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveFreeEpcMemoryBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveFreeEpcMemory(ctx context.Context, r soap.RoundTripper, req *types.RetrieveFreeEpcMemory) (*types.RetrieveFreeEpcMemoryResponse, error) { + var reqBody, resBody RetrieveFreeEpcMemoryBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RetrieveHardwareUptimeBody struct { Req *types.RetrieveHardwareUptime `xml:"urn:vim25 RetrieveHardwareUptime,omitempty"` - Res *types.RetrieveHardwareUptimeResponse `xml:"urn:vim25 RetrieveHardwareUptimeResponse,omitempty"` + Res *types.RetrieveHardwareUptimeResponse `xml:"RetrieveHardwareUptimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13305,7 +14285,7 @@ func RetrieveHardwareUptime(ctx context.Context, r soap.RoundTripper, req *types type RetrieveHostAccessControlEntriesBody struct { Req *types.RetrieveHostAccessControlEntries `xml:"urn:vim25 RetrieveHostAccessControlEntries,omitempty"` - Res *types.RetrieveHostAccessControlEntriesResponse `xml:"urn:vim25 RetrieveHostAccessControlEntriesResponse,omitempty"` + Res *types.RetrieveHostAccessControlEntriesResponse `xml:"RetrieveHostAccessControlEntriesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13325,7 +14305,7 @@ func RetrieveHostAccessControlEntries(ctx context.Context, r soap.RoundTripper, type RetrieveHostCustomizationsBody struct { Req *types.RetrieveHostCustomizations `xml:"urn:vim25 RetrieveHostCustomizations,omitempty"` - Res *types.RetrieveHostCustomizationsResponse `xml:"urn:vim25 RetrieveHostCustomizationsResponse,omitempty"` + Res *types.RetrieveHostCustomizationsResponse `xml:"RetrieveHostCustomizationsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13345,7 +14325,7 @@ func RetrieveHostCustomizations(ctx context.Context, r soap.RoundTripper, req *t type RetrieveHostCustomizationsForProfileBody struct { Req *types.RetrieveHostCustomizationsForProfile `xml:"urn:vim25 RetrieveHostCustomizationsForProfile,omitempty"` - Res *types.RetrieveHostCustomizationsForProfileResponse `xml:"urn:vim25 RetrieveHostCustomizationsForProfileResponse,omitempty"` + Res *types.RetrieveHostCustomizationsForProfileResponse `xml:"RetrieveHostCustomizationsForProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13365,7 +14345,7 @@ func RetrieveHostCustomizationsForProfile(ctx context.Context, r soap.RoundTripp type RetrieveHostSpecificationBody struct { Req *types.RetrieveHostSpecification `xml:"urn:vim25 RetrieveHostSpecification,omitempty"` - Res *types.RetrieveHostSpecificationResponse `xml:"urn:vim25 RetrieveHostSpecificationResponse,omitempty"` + Res *types.RetrieveHostSpecificationResponse `xml:"RetrieveHostSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13385,7 +14365,7 @@ func RetrieveHostSpecification(ctx context.Context, r soap.RoundTripper, req *ty type RetrieveKmipServerCertBody struct { Req *types.RetrieveKmipServerCert `xml:"urn:vim25 RetrieveKmipServerCert,omitempty"` - Res *types.RetrieveKmipServerCertResponse `xml:"urn:vim25 RetrieveKmipServerCertResponse,omitempty"` + Res *types.RetrieveKmipServerCertResponse `xml:"RetrieveKmipServerCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13405,7 +14385,7 @@ func RetrieveKmipServerCert(ctx context.Context, r soap.RoundTripper, req *types type RetrieveKmipServersStatus_TaskBody struct { Req *types.RetrieveKmipServersStatus_Task `xml:"urn:vim25 RetrieveKmipServersStatus_Task,omitempty"` - Res *types.RetrieveKmipServersStatus_TaskResponse `xml:"urn:vim25 RetrieveKmipServersStatus_TaskResponse,omitempty"` + Res *types.RetrieveKmipServersStatus_TaskResponse `xml:"RetrieveKmipServersStatus_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13425,7 +14405,7 @@ func RetrieveKmipServersStatus_Task(ctx context.Context, r soap.RoundTripper, re type RetrieveObjectScheduledTaskBody struct { Req *types.RetrieveObjectScheduledTask `xml:"urn:vim25 RetrieveObjectScheduledTask,omitempty"` - Res *types.RetrieveObjectScheduledTaskResponse `xml:"urn:vim25 RetrieveObjectScheduledTaskResponse,omitempty"` + Res *types.RetrieveObjectScheduledTaskResponse `xml:"RetrieveObjectScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13445,7 +14425,7 @@ func RetrieveObjectScheduledTask(ctx context.Context, r soap.RoundTripper, req * type RetrieveProductComponentsBody struct { Req *types.RetrieveProductComponents `xml:"urn:vim25 RetrieveProductComponents,omitempty"` - Res *types.RetrieveProductComponentsResponse `xml:"urn:vim25 RetrieveProductComponentsResponse,omitempty"` + Res *types.RetrieveProductComponentsResponse `xml:"RetrieveProductComponentsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13465,7 +14445,7 @@ func RetrieveProductComponents(ctx context.Context, r soap.RoundTripper, req *ty type RetrievePropertiesBody struct { Req *types.RetrieveProperties `xml:"urn:vim25 RetrieveProperties,omitempty"` - Res *types.RetrievePropertiesResponse `xml:"urn:vim25 RetrievePropertiesResponse,omitempty"` + Res *types.RetrievePropertiesResponse `xml:"RetrievePropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13485,7 +14465,7 @@ func RetrieveProperties(ctx context.Context, r soap.RoundTripper, req *types.Ret type RetrievePropertiesExBody struct { Req *types.RetrievePropertiesEx `xml:"urn:vim25 RetrievePropertiesEx,omitempty"` - Res *types.RetrievePropertiesExResponse `xml:"urn:vim25 RetrievePropertiesExResponse,omitempty"` + Res *types.RetrievePropertiesExResponse `xml:"RetrievePropertiesExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13505,7 +14485,7 @@ func RetrievePropertiesEx(ctx context.Context, r soap.RoundTripper, req *types.R type RetrieveRolePermissionsBody struct { Req *types.RetrieveRolePermissions `xml:"urn:vim25 RetrieveRolePermissions,omitempty"` - Res *types.RetrieveRolePermissionsResponse `xml:"urn:vim25 RetrieveRolePermissionsResponse,omitempty"` + Res *types.RetrieveRolePermissionsResponse `xml:"RetrieveRolePermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13525,7 +14505,7 @@ func RetrieveRolePermissions(ctx context.Context, r soap.RoundTripper, req *type type RetrieveSelfSignedClientCertBody struct { Req *types.RetrieveSelfSignedClientCert `xml:"urn:vim25 RetrieveSelfSignedClientCert,omitempty"` - Res *types.RetrieveSelfSignedClientCertResponse `xml:"urn:vim25 RetrieveSelfSignedClientCertResponse,omitempty"` + Res *types.RetrieveSelfSignedClientCertResponse `xml:"RetrieveSelfSignedClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13545,7 +14525,7 @@ func RetrieveSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req type RetrieveServiceContentBody struct { Req *types.RetrieveServiceContent `xml:"urn:vim25 RetrieveServiceContent,omitempty"` - Res *types.RetrieveServiceContentResponse `xml:"urn:vim25 RetrieveServiceContentResponse,omitempty"` + Res *types.RetrieveServiceContentResponse `xml:"RetrieveServiceContentResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13563,9 +14543,49 @@ func RetrieveServiceContent(ctx context.Context, r soap.RoundTripper, req *types return resBody.Res, nil } +type RetrieveServiceProviderEntitiesBody struct { + Req *types.RetrieveServiceProviderEntities `xml:"urn:vim25 RetrieveServiceProviderEntities,omitempty"` + Res *types.RetrieveServiceProviderEntitiesResponse `xml:"RetrieveServiceProviderEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveServiceProviderEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveServiceProviderEntities(ctx context.Context, r soap.RoundTripper, req *types.RetrieveServiceProviderEntities) (*types.RetrieveServiceProviderEntitiesResponse, error) { + var reqBody, resBody RetrieveServiceProviderEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type RetrieveSnapshotDetailsBody struct { + Req *types.RetrieveSnapshotDetails `xml:"urn:vim25 RetrieveSnapshotDetails,omitempty"` + Res *types.RetrieveSnapshotDetailsResponse `xml:"RetrieveSnapshotDetailsResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *RetrieveSnapshotDetailsBody) Fault() *soap.Fault { return b.Fault_ } + +func RetrieveSnapshotDetails(ctx context.Context, r soap.RoundTripper, req *types.RetrieveSnapshotDetails) (*types.RetrieveSnapshotDetailsResponse, error) { + var reqBody, resBody RetrieveSnapshotDetailsBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type RetrieveSnapshotInfoBody struct { Req *types.RetrieveSnapshotInfo `xml:"urn:vim25 RetrieveSnapshotInfo,omitempty"` - Res *types.RetrieveSnapshotInfoResponse `xml:"urn:vim25 RetrieveSnapshotInfoResponse,omitempty"` + Res *types.RetrieveSnapshotInfoResponse `xml:"RetrieveSnapshotInfoResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13585,7 +14605,7 @@ func RetrieveSnapshotInfo(ctx context.Context, r soap.RoundTripper, req *types.R type RetrieveUserGroupsBody struct { Req *types.RetrieveUserGroups `xml:"urn:vim25 RetrieveUserGroups,omitempty"` - Res *types.RetrieveUserGroupsResponse `xml:"urn:vim25 RetrieveUserGroupsResponse,omitempty"` + Res *types.RetrieveUserGroupsResponse `xml:"RetrieveUserGroupsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13605,7 +14625,7 @@ func RetrieveUserGroups(ctx context.Context, r soap.RoundTripper, req *types.Ret type RetrieveVStorageInfrastructureObjectPolicyBody struct { Req *types.RetrieveVStorageInfrastructureObjectPolicy `xml:"urn:vim25 RetrieveVStorageInfrastructureObjectPolicy,omitempty"` - Res *types.RetrieveVStorageInfrastructureObjectPolicyResponse `xml:"urn:vim25 RetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` + Res *types.RetrieveVStorageInfrastructureObjectPolicyResponse `xml:"RetrieveVStorageInfrastructureObjectPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13625,7 +14645,7 @@ func RetrieveVStorageInfrastructureObjectPolicy(ctx context.Context, r soap.Roun type RetrieveVStorageObjectBody struct { Req *types.RetrieveVStorageObject `xml:"urn:vim25 RetrieveVStorageObject,omitempty"` - Res *types.RetrieveVStorageObjectResponse `xml:"urn:vim25 RetrieveVStorageObjectResponse,omitempty"` + Res *types.RetrieveVStorageObjectResponse `xml:"RetrieveVStorageObjectResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13645,7 +14665,7 @@ func RetrieveVStorageObject(ctx context.Context, r soap.RoundTripper, req *types type RetrieveVStorageObjectAssociationsBody struct { Req *types.RetrieveVStorageObjectAssociations `xml:"urn:vim25 RetrieveVStorageObjectAssociations,omitempty"` - Res *types.RetrieveVStorageObjectAssociationsResponse `xml:"urn:vim25 RetrieveVStorageObjectAssociationsResponse,omitempty"` + Res *types.RetrieveVStorageObjectAssociationsResponse `xml:"RetrieveVStorageObjectAssociationsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13665,7 +14685,7 @@ func RetrieveVStorageObjectAssociations(ctx context.Context, r soap.RoundTripper type RetrieveVStorageObjectStateBody struct { Req *types.RetrieveVStorageObjectState `xml:"urn:vim25 RetrieveVStorageObjectState,omitempty"` - Res *types.RetrieveVStorageObjectStateResponse `xml:"urn:vim25 RetrieveVStorageObjectStateResponse,omitempty"` + Res *types.RetrieveVStorageObjectStateResponse `xml:"RetrieveVStorageObjectStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13685,7 +14705,7 @@ func RetrieveVStorageObjectState(ctx context.Context, r soap.RoundTripper, req * type RevertToCurrentSnapshot_TaskBody struct { Req *types.RevertToCurrentSnapshot_Task `xml:"urn:vim25 RevertToCurrentSnapshot_Task,omitempty"` - Res *types.RevertToCurrentSnapshot_TaskResponse `xml:"urn:vim25 RevertToCurrentSnapshot_TaskResponse,omitempty"` + Res *types.RevertToCurrentSnapshot_TaskResponse `xml:"RevertToCurrentSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13705,7 +14725,7 @@ func RevertToCurrentSnapshot_Task(ctx context.Context, r soap.RoundTripper, req type RevertToSnapshot_TaskBody struct { Req *types.RevertToSnapshot_Task `xml:"urn:vim25 RevertToSnapshot_Task,omitempty"` - Res *types.RevertToSnapshot_TaskResponse `xml:"urn:vim25 RevertToSnapshot_TaskResponse,omitempty"` + Res *types.RevertToSnapshot_TaskResponse `xml:"RevertToSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13725,7 +14745,7 @@ func RevertToSnapshot_Task(ctx context.Context, r soap.RoundTripper, req *types. type RevertVStorageObject_TaskBody struct { Req *types.RevertVStorageObject_Task `xml:"urn:vim25 RevertVStorageObject_Task,omitempty"` - Res *types.RevertVStorageObject_TaskResponse `xml:"urn:vim25 RevertVStorageObject_TaskResponse,omitempty"` + Res *types.RevertVStorageObject_TaskResponse `xml:"RevertVStorageObject_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13745,7 +14765,7 @@ func RevertVStorageObject_Task(ctx context.Context, r soap.RoundTripper, req *ty type RewindCollectorBody struct { Req *types.RewindCollector `xml:"urn:vim25 RewindCollector,omitempty"` - Res *types.RewindCollectorResponse `xml:"urn:vim25 RewindCollectorResponse,omitempty"` + Res *types.RewindCollectorResponse `xml:"RewindCollectorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13765,7 +14785,7 @@ func RewindCollector(ctx context.Context, r soap.RoundTripper, req *types.Rewind type RunScheduledTaskBody struct { Req *types.RunScheduledTask `xml:"urn:vim25 RunScheduledTask,omitempty"` - Res *types.RunScheduledTaskResponse `xml:"urn:vim25 RunScheduledTaskResponse,omitempty"` + Res *types.RunScheduledTaskResponse `xml:"RunScheduledTaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13785,7 +14805,7 @@ func RunScheduledTask(ctx context.Context, r soap.RoundTripper, req *types.RunSc type RunVsanPhysicalDiskDiagnosticsBody struct { Req *types.RunVsanPhysicalDiskDiagnostics `xml:"urn:vim25 RunVsanPhysicalDiskDiagnostics,omitempty"` - Res *types.RunVsanPhysicalDiskDiagnosticsResponse `xml:"urn:vim25 RunVsanPhysicalDiskDiagnosticsResponse,omitempty"` + Res *types.RunVsanPhysicalDiskDiagnosticsResponse `xml:"RunVsanPhysicalDiskDiagnosticsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13805,7 +14825,7 @@ func RunVsanPhysicalDiskDiagnostics(ctx context.Context, r soap.RoundTripper, re type ScanHostPatchV2_TaskBody struct { Req *types.ScanHostPatchV2_Task `xml:"urn:vim25 ScanHostPatchV2_Task,omitempty"` - Res *types.ScanHostPatchV2_TaskResponse `xml:"urn:vim25 ScanHostPatchV2_TaskResponse,omitempty"` + Res *types.ScanHostPatchV2_TaskResponse `xml:"ScanHostPatchV2_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13825,7 +14845,7 @@ func ScanHostPatchV2_Task(ctx context.Context, r soap.RoundTripper, req *types.S type ScanHostPatch_TaskBody struct { Req *types.ScanHostPatch_Task `xml:"urn:vim25 ScanHostPatch_Task,omitempty"` - Res *types.ScanHostPatch_TaskResponse `xml:"urn:vim25 ScanHostPatch_TaskResponse,omitempty"` + Res *types.ScanHostPatch_TaskResponse `xml:"ScanHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13845,7 +14865,7 @@ func ScanHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.Sca type ScheduleReconcileDatastoreInventoryBody struct { Req *types.ScheduleReconcileDatastoreInventory `xml:"urn:vim25 ScheduleReconcileDatastoreInventory,omitempty"` - Res *types.ScheduleReconcileDatastoreInventoryResponse `xml:"urn:vim25 ScheduleReconcileDatastoreInventoryResponse,omitempty"` + Res *types.ScheduleReconcileDatastoreInventoryResponse `xml:"ScheduleReconcileDatastoreInventoryResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13865,7 +14885,7 @@ func ScheduleReconcileDatastoreInventory(ctx context.Context, r soap.RoundTrippe type SearchDatastoreSubFolders_TaskBody struct { Req *types.SearchDatastoreSubFolders_Task `xml:"urn:vim25 SearchDatastoreSubFolders_Task,omitempty"` - Res *types.SearchDatastoreSubFolders_TaskResponse `xml:"urn:vim25 SearchDatastoreSubFolders_TaskResponse,omitempty"` + Res *types.SearchDatastoreSubFolders_TaskResponse `xml:"SearchDatastoreSubFolders_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13885,7 +14905,7 @@ func SearchDatastoreSubFolders_Task(ctx context.Context, r soap.RoundTripper, re type SearchDatastore_TaskBody struct { Req *types.SearchDatastore_Task `xml:"urn:vim25 SearchDatastore_Task,omitempty"` - Res *types.SearchDatastore_TaskResponse `xml:"urn:vim25 SearchDatastore_TaskResponse,omitempty"` + Res *types.SearchDatastore_TaskResponse `xml:"SearchDatastore_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13905,7 +14925,7 @@ func SearchDatastore_Task(ctx context.Context, r soap.RoundTripper, req *types.S type SelectActivePartitionBody struct { Req *types.SelectActivePartition `xml:"urn:vim25 SelectActivePartition,omitempty"` - Res *types.SelectActivePartitionResponse `xml:"urn:vim25 SelectActivePartitionResponse,omitempty"` + Res *types.SelectActivePartitionResponse `xml:"SelectActivePartitionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13925,7 +14945,7 @@ func SelectActivePartition(ctx context.Context, r soap.RoundTripper, req *types. type SelectVnicBody struct { Req *types.SelectVnic `xml:"urn:vim25 SelectVnic,omitempty"` - Res *types.SelectVnicResponse `xml:"urn:vim25 SelectVnicResponse,omitempty"` + Res *types.SelectVnicResponse `xml:"SelectVnicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13945,7 +14965,7 @@ func SelectVnic(ctx context.Context, r soap.RoundTripper, req *types.SelectVnic) type SelectVnicForNicTypeBody struct { Req *types.SelectVnicForNicType `xml:"urn:vim25 SelectVnicForNicType,omitempty"` - Res *types.SelectVnicForNicTypeResponse `xml:"urn:vim25 SelectVnicForNicTypeResponse,omitempty"` + Res *types.SelectVnicForNicTypeResponse `xml:"SelectVnicForNicTypeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13965,7 +14985,7 @@ func SelectVnicForNicType(ctx context.Context, r soap.RoundTripper, req *types.S type SendNMIBody struct { Req *types.SendNMI `xml:"urn:vim25 SendNMI,omitempty"` - Res *types.SendNMIResponse `xml:"urn:vim25 SendNMIResponse,omitempty"` + Res *types.SendNMIResponse `xml:"SendNMIResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -13985,7 +15005,7 @@ func SendNMI(ctx context.Context, r soap.RoundTripper, req *types.SendNMI) (*typ type SendTestNotificationBody struct { Req *types.SendTestNotification `xml:"urn:vim25 SendTestNotification,omitempty"` - Res *types.SendTestNotificationResponse `xml:"urn:vim25 SendTestNotificationResponse,omitempty"` + Res *types.SendTestNotificationResponse `xml:"SendTestNotificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14005,7 +15025,7 @@ func SendTestNotification(ctx context.Context, r soap.RoundTripper, req *types.S type SessionIsActiveBody struct { Req *types.SessionIsActive `xml:"urn:vim25 SessionIsActive,omitempty"` - Res *types.SessionIsActiveResponse `xml:"urn:vim25 SessionIsActiveResponse,omitempty"` + Res *types.SessionIsActiveResponse `xml:"SessionIsActiveResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14025,7 +15045,7 @@ func SessionIsActive(ctx context.Context, r soap.RoundTripper, req *types.Sessio type SetCollectorPageSizeBody struct { Req *types.SetCollectorPageSize `xml:"urn:vim25 SetCollectorPageSize,omitempty"` - Res *types.SetCollectorPageSizeResponse `xml:"urn:vim25 SetCollectorPageSizeResponse,omitempty"` + Res *types.SetCollectorPageSizeResponse `xml:"SetCollectorPageSizeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14043,9 +15063,49 @@ func SetCollectorPageSize(ctx context.Context, r soap.RoundTripper, req *types.S return resBody.Res, nil } +type SetCryptoModeBody struct { + Req *types.SetCryptoMode `xml:"urn:vim25 SetCryptoMode,omitempty"` + Res *types.SetCryptoModeResponse `xml:"SetCryptoModeResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetCryptoModeBody) Fault() *soap.Fault { return b.Fault_ } + +func SetCryptoMode(ctx context.Context, r soap.RoundTripper, req *types.SetCryptoMode) (*types.SetCryptoModeResponse, error) { + var reqBody, resBody SetCryptoModeBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + +type SetDefaultKmsClusterBody struct { + Req *types.SetDefaultKmsCluster `xml:"urn:vim25 SetDefaultKmsCluster,omitempty"` + Res *types.SetDefaultKmsClusterResponse `xml:"SetDefaultKmsClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetDefaultKmsClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func SetDefaultKmsCluster(ctx context.Context, r soap.RoundTripper, req *types.SetDefaultKmsCluster) (*types.SetDefaultKmsClusterResponse, error) { + var reqBody, resBody SetDefaultKmsClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type SetDisplayTopologyBody struct { Req *types.SetDisplayTopology `xml:"urn:vim25 SetDisplayTopology,omitempty"` - Res *types.SetDisplayTopologyResponse `xml:"urn:vim25 SetDisplayTopologyResponse,omitempty"` + Res *types.SetDisplayTopologyResponse `xml:"SetDisplayTopologyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14065,7 +15125,7 @@ func SetDisplayTopology(ctx context.Context, r soap.RoundTripper, req *types.Set type SetEntityPermissionsBody struct { Req *types.SetEntityPermissions `xml:"urn:vim25 SetEntityPermissions,omitempty"` - Res *types.SetEntityPermissionsResponse `xml:"urn:vim25 SetEntityPermissionsResponse,omitempty"` + Res *types.SetEntityPermissionsResponse `xml:"SetEntityPermissionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14085,7 +15145,7 @@ func SetEntityPermissions(ctx context.Context, r soap.RoundTripper, req *types.S type SetExtensionCertificateBody struct { Req *types.SetExtensionCertificate `xml:"urn:vim25 SetExtensionCertificate,omitempty"` - Res *types.SetExtensionCertificateResponse `xml:"urn:vim25 SetExtensionCertificateResponse,omitempty"` + Res *types.SetExtensionCertificateResponse `xml:"SetExtensionCertificateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14105,7 +15165,7 @@ func SetExtensionCertificate(ctx context.Context, r soap.RoundTripper, req *type type SetFieldBody struct { Req *types.SetField `xml:"urn:vim25 SetField,omitempty"` - Res *types.SetFieldResponse `xml:"urn:vim25 SetFieldResponse,omitempty"` + Res *types.SetFieldResponse `xml:"SetFieldResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14125,7 +15185,7 @@ func SetField(ctx context.Context, r soap.RoundTripper, req *types.SetField) (*t type SetLicenseEditionBody struct { Req *types.SetLicenseEdition `xml:"urn:vim25 SetLicenseEdition,omitempty"` - Res *types.SetLicenseEditionResponse `xml:"urn:vim25 SetLicenseEditionResponse,omitempty"` + Res *types.SetLicenseEditionResponse `xml:"SetLicenseEditionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14145,7 +15205,7 @@ func SetLicenseEdition(ctx context.Context, r soap.RoundTripper, req *types.SetL type SetLocaleBody struct { Req *types.SetLocale `xml:"urn:vim25 SetLocale,omitempty"` - Res *types.SetLocaleResponse `xml:"urn:vim25 SetLocaleResponse,omitempty"` + Res *types.SetLocaleResponse `xml:"SetLocaleResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14165,7 +15225,7 @@ func SetLocale(ctx context.Context, r soap.RoundTripper, req *types.SetLocale) ( type SetMultipathLunPolicyBody struct { Req *types.SetMultipathLunPolicy `xml:"urn:vim25 SetMultipathLunPolicy,omitempty"` - Res *types.SetMultipathLunPolicyResponse `xml:"urn:vim25 SetMultipathLunPolicyResponse,omitempty"` + Res *types.SetMultipathLunPolicyResponse `xml:"SetMultipathLunPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14185,7 +15245,7 @@ func SetMultipathLunPolicy(ctx context.Context, r soap.RoundTripper, req *types. type SetNFSUserBody struct { Req *types.SetNFSUser `xml:"urn:vim25 SetNFSUser,omitempty"` - Res *types.SetNFSUserResponse `xml:"urn:vim25 SetNFSUserResponse,omitempty"` + Res *types.SetNFSUserResponse `xml:"SetNFSUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14205,7 +15265,7 @@ func SetNFSUser(ctx context.Context, r soap.RoundTripper, req *types.SetNFSUser) type SetPublicKeyBody struct { Req *types.SetPublicKey `xml:"urn:vim25 SetPublicKey,omitempty"` - Res *types.SetPublicKeyResponse `xml:"urn:vim25 SetPublicKeyResponse,omitempty"` + Res *types.SetPublicKeyResponse `xml:"SetPublicKeyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14225,7 +15285,7 @@ func SetPublicKey(ctx context.Context, r soap.RoundTripper, req *types.SetPublic type SetRegistryValueInGuestBody struct { Req *types.SetRegistryValueInGuest `xml:"urn:vim25 SetRegistryValueInGuest,omitempty"` - Res *types.SetRegistryValueInGuestResponse `xml:"urn:vim25 SetRegistryValueInGuestResponse,omitempty"` + Res *types.SetRegistryValueInGuestResponse `xml:"SetRegistryValueInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14245,7 +15305,7 @@ func SetRegistryValueInGuest(ctx context.Context, r soap.RoundTripper, req *type type SetScreenResolutionBody struct { Req *types.SetScreenResolution `xml:"urn:vim25 SetScreenResolution,omitempty"` - Res *types.SetScreenResolutionResponse `xml:"urn:vim25 SetScreenResolutionResponse,omitempty"` + Res *types.SetScreenResolutionResponse `xml:"SetScreenResolutionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14265,7 +15325,7 @@ func SetScreenResolution(ctx context.Context, r soap.RoundTripper, req *types.Se type SetTaskDescriptionBody struct { Req *types.SetTaskDescription `xml:"urn:vim25 SetTaskDescription,omitempty"` - Res *types.SetTaskDescriptionResponse `xml:"urn:vim25 SetTaskDescriptionResponse,omitempty"` + Res *types.SetTaskDescriptionResponse `xml:"SetTaskDescriptionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14285,7 +15345,7 @@ func SetTaskDescription(ctx context.Context, r soap.RoundTripper, req *types.Set type SetTaskStateBody struct { Req *types.SetTaskState `xml:"urn:vim25 SetTaskState,omitempty"` - Res *types.SetTaskStateResponse `xml:"urn:vim25 SetTaskStateResponse,omitempty"` + Res *types.SetTaskStateResponse `xml:"SetTaskStateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14305,7 +15365,7 @@ func SetTaskState(ctx context.Context, r soap.RoundTripper, req *types.SetTaskSt type SetVStorageObjectControlFlagsBody struct { Req *types.SetVStorageObjectControlFlags `xml:"urn:vim25 SetVStorageObjectControlFlags,omitempty"` - Res *types.SetVStorageObjectControlFlagsResponse `xml:"urn:vim25 SetVStorageObjectControlFlagsResponse,omitempty"` + Res *types.SetVStorageObjectControlFlagsResponse `xml:"SetVStorageObjectControlFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14325,7 +15385,7 @@ func SetVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, req type SetVirtualDiskUuidBody struct { Req *types.SetVirtualDiskUuid `xml:"urn:vim25 SetVirtualDiskUuid,omitempty"` - Res *types.SetVirtualDiskUuidResponse `xml:"urn:vim25 SetVirtualDiskUuidResponse,omitempty"` + Res *types.SetVirtualDiskUuidResponse `xml:"SetVirtualDiskUuidResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14345,7 +15405,7 @@ func SetVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.Set type ShrinkVirtualDisk_TaskBody struct { Req *types.ShrinkVirtualDisk_Task `xml:"urn:vim25 ShrinkVirtualDisk_Task,omitempty"` - Res *types.ShrinkVirtualDisk_TaskResponse `xml:"urn:vim25 ShrinkVirtualDisk_TaskResponse,omitempty"` + Res *types.ShrinkVirtualDisk_TaskResponse `xml:"ShrinkVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14365,7 +15425,7 @@ func ShrinkVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types type ShutdownGuestBody struct { Req *types.ShutdownGuest `xml:"urn:vim25 ShutdownGuest,omitempty"` - Res *types.ShutdownGuestResponse `xml:"urn:vim25 ShutdownGuestResponse,omitempty"` + Res *types.ShutdownGuestResponse `xml:"ShutdownGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14385,7 +15445,7 @@ func ShutdownGuest(ctx context.Context, r soap.RoundTripper, req *types.Shutdown type ShutdownHost_TaskBody struct { Req *types.ShutdownHost_Task `xml:"urn:vim25 ShutdownHost_Task,omitempty"` - Res *types.ShutdownHost_TaskResponse `xml:"urn:vim25 ShutdownHost_TaskResponse,omitempty"` + Res *types.ShutdownHost_TaskResponse `xml:"ShutdownHost_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14405,7 +15465,7 @@ func ShutdownHost_Task(ctx context.Context, r soap.RoundTripper, req *types.Shut type StageHostPatch_TaskBody struct { Req *types.StageHostPatch_Task `xml:"urn:vim25 StageHostPatch_Task,omitempty"` - Res *types.StageHostPatch_TaskResponse `xml:"urn:vim25 StageHostPatch_TaskResponse,omitempty"` + Res *types.StageHostPatch_TaskResponse `xml:"StageHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14425,7 +15485,7 @@ func StageHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *types.St type StampAllRulesWithUuid_TaskBody struct { Req *types.StampAllRulesWithUuid_Task `xml:"urn:vim25 StampAllRulesWithUuid_Task,omitempty"` - Res *types.StampAllRulesWithUuid_TaskResponse `xml:"urn:vim25 StampAllRulesWithUuid_TaskResponse,omitempty"` + Res *types.StampAllRulesWithUuid_TaskResponse `xml:"StampAllRulesWithUuid_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14445,7 +15505,7 @@ func StampAllRulesWithUuid_Task(ctx context.Context, r soap.RoundTripper, req *t type StandbyGuestBody struct { Req *types.StandbyGuest `xml:"urn:vim25 StandbyGuest,omitempty"` - Res *types.StandbyGuestResponse `xml:"urn:vim25 StandbyGuestResponse,omitempty"` + Res *types.StandbyGuestResponse `xml:"StandbyGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14463,9 +15523,29 @@ func StandbyGuest(ctx context.Context, r soap.RoundTripper, req *types.StandbyGu return resBody.Res, nil } +type StartGuestNetwork_TaskBody struct { + Req *types.StartGuestNetwork_Task `xml:"urn:vim25 StartGuestNetwork_Task,omitempty"` + Res *types.StartGuestNetwork_TaskResponse `xml:"StartGuestNetwork_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartGuestNetwork_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func StartGuestNetwork_Task(ctx context.Context, r soap.RoundTripper, req *types.StartGuestNetwork_Task) (*types.StartGuestNetwork_TaskResponse, error) { + var reqBody, resBody StartGuestNetwork_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type StartProgramInGuestBody struct { Req *types.StartProgramInGuest `xml:"urn:vim25 StartProgramInGuest,omitempty"` - Res *types.StartProgramInGuestResponse `xml:"urn:vim25 StartProgramInGuestResponse,omitempty"` + Res *types.StartProgramInGuestResponse `xml:"StartProgramInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14485,7 +15565,7 @@ func StartProgramInGuest(ctx context.Context, r soap.RoundTripper, req *types.St type StartRecording_TaskBody struct { Req *types.StartRecording_Task `xml:"urn:vim25 StartRecording_Task,omitempty"` - Res *types.StartRecording_TaskResponse `xml:"urn:vim25 StartRecording_TaskResponse,omitempty"` + Res *types.StartRecording_TaskResponse `xml:"StartRecording_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14505,7 +15585,7 @@ func StartRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.St type StartReplaying_TaskBody struct { Req *types.StartReplaying_Task `xml:"urn:vim25 StartReplaying_Task,omitempty"` - Res *types.StartReplaying_TaskResponse `xml:"urn:vim25 StartReplaying_TaskResponse,omitempty"` + Res *types.StartReplaying_TaskResponse `xml:"StartReplaying_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14525,7 +15605,7 @@ func StartReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.St type StartServiceBody struct { Req *types.StartService `xml:"urn:vim25 StartService,omitempty"` - Res *types.StartServiceResponse `xml:"urn:vim25 StartServiceResponse,omitempty"` + Res *types.StartServiceResponse `xml:"StartServiceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14545,7 +15625,7 @@ func StartService(ctx context.Context, r soap.RoundTripper, req *types.StartServ type StopRecording_TaskBody struct { Req *types.StopRecording_Task `xml:"urn:vim25 StopRecording_Task,omitempty"` - Res *types.StopRecording_TaskResponse `xml:"urn:vim25 StopRecording_TaskResponse,omitempty"` + Res *types.StopRecording_TaskResponse `xml:"StopRecording_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14565,7 +15645,7 @@ func StopRecording_Task(ctx context.Context, r soap.RoundTripper, req *types.Sto type StopReplaying_TaskBody struct { Req *types.StopReplaying_Task `xml:"urn:vim25 StopReplaying_Task,omitempty"` - Res *types.StopReplaying_TaskResponse `xml:"urn:vim25 StopReplaying_TaskResponse,omitempty"` + Res *types.StopReplaying_TaskResponse `xml:"StopReplaying_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14585,7 +15665,7 @@ func StopReplaying_Task(ctx context.Context, r soap.RoundTripper, req *types.Sto type StopServiceBody struct { Req *types.StopService `xml:"urn:vim25 StopService,omitempty"` - Res *types.StopServiceResponse `xml:"urn:vim25 StopServiceResponse,omitempty"` + Res *types.StopServiceResponse `xml:"StopServiceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14605,7 +15685,7 @@ func StopService(ctx context.Context, r soap.RoundTripper, req *types.StopServic type SuspendVApp_TaskBody struct { Req *types.SuspendVApp_Task `xml:"urn:vim25 SuspendVApp_Task,omitempty"` - Res *types.SuspendVApp_TaskResponse `xml:"urn:vim25 SuspendVApp_TaskResponse,omitempty"` + Res *types.SuspendVApp_TaskResponse `xml:"SuspendVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14625,7 +15705,7 @@ func SuspendVApp_Task(ctx context.Context, r soap.RoundTripper, req *types.Suspe type SuspendVM_TaskBody struct { Req *types.SuspendVM_Task `xml:"urn:vim25 SuspendVM_Task,omitempty"` - Res *types.SuspendVM_TaskResponse `xml:"urn:vim25 SuspendVM_TaskResponse,omitempty"` + Res *types.SuspendVM_TaskResponse `xml:"SuspendVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14645,7 +15725,7 @@ func SuspendVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Suspend type TerminateFaultTolerantVM_TaskBody struct { Req *types.TerminateFaultTolerantVM_Task `xml:"urn:vim25 TerminateFaultTolerantVM_Task,omitempty"` - Res *types.TerminateFaultTolerantVM_TaskResponse `xml:"urn:vim25 TerminateFaultTolerantVM_TaskResponse,omitempty"` + Res *types.TerminateFaultTolerantVM_TaskResponse `xml:"TerminateFaultTolerantVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14665,7 +15745,7 @@ func TerminateFaultTolerantVM_Task(ctx context.Context, r soap.RoundTripper, req type TerminateProcessInGuestBody struct { Req *types.TerminateProcessInGuest `xml:"urn:vim25 TerminateProcessInGuest,omitempty"` - Res *types.TerminateProcessInGuestResponse `xml:"urn:vim25 TerminateProcessInGuestResponse,omitempty"` + Res *types.TerminateProcessInGuestResponse `xml:"TerminateProcessInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14685,7 +15765,7 @@ func TerminateProcessInGuest(ctx context.Context, r soap.RoundTripper, req *type type TerminateSessionBody struct { Req *types.TerminateSession `xml:"urn:vim25 TerminateSession,omitempty"` - Res *types.TerminateSessionResponse `xml:"urn:vim25 TerminateSessionResponse,omitempty"` + Res *types.TerminateSessionResponse `xml:"TerminateSessionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14705,7 +15785,7 @@ func TerminateSession(ctx context.Context, r soap.RoundTripper, req *types.Termi type TerminateVMBody struct { Req *types.TerminateVM `xml:"urn:vim25 TerminateVM,omitempty"` - Res *types.TerminateVMResponse `xml:"urn:vim25 TerminateVMResponse,omitempty"` + Res *types.TerminateVMResponse `xml:"TerminateVMResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14723,9 +15803,29 @@ func TerminateVM(ctx context.Context, r soap.RoundTripper, req *types.TerminateV return resBody.Res, nil } +type TestTimeServiceBody struct { + Req *types.TestTimeService `xml:"urn:vim25 TestTimeService,omitempty"` + Res *types.TestTimeServiceResponse `xml:"TestTimeServiceResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *TestTimeServiceBody) Fault() *soap.Fault { return b.Fault_ } + +func TestTimeService(ctx context.Context, r soap.RoundTripper, req *types.TestTimeService) (*types.TestTimeServiceResponse, error) { + var reqBody, resBody TestTimeServiceBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type TurnDiskLocatorLedOff_TaskBody struct { Req *types.TurnDiskLocatorLedOff_Task `xml:"urn:vim25 TurnDiskLocatorLedOff_Task,omitempty"` - Res *types.TurnDiskLocatorLedOff_TaskResponse `xml:"urn:vim25 TurnDiskLocatorLedOff_TaskResponse,omitempty"` + Res *types.TurnDiskLocatorLedOff_TaskResponse `xml:"TurnDiskLocatorLedOff_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14745,7 +15845,7 @@ func TurnDiskLocatorLedOff_Task(ctx context.Context, r soap.RoundTripper, req *t type TurnDiskLocatorLedOn_TaskBody struct { Req *types.TurnDiskLocatorLedOn_Task `xml:"urn:vim25 TurnDiskLocatorLedOn_Task,omitempty"` - Res *types.TurnDiskLocatorLedOn_TaskResponse `xml:"urn:vim25 TurnDiskLocatorLedOn_TaskResponse,omitempty"` + Res *types.TurnDiskLocatorLedOn_TaskResponse `xml:"TurnDiskLocatorLedOn_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14765,7 +15865,7 @@ func TurnDiskLocatorLedOn_Task(ctx context.Context, r soap.RoundTripper, req *ty type TurnOffFaultToleranceForVM_TaskBody struct { Req *types.TurnOffFaultToleranceForVM_Task `xml:"urn:vim25 TurnOffFaultToleranceForVM_Task,omitempty"` - Res *types.TurnOffFaultToleranceForVM_TaskResponse `xml:"urn:vim25 TurnOffFaultToleranceForVM_TaskResponse,omitempty"` + Res *types.TurnOffFaultToleranceForVM_TaskResponse `xml:"TurnOffFaultToleranceForVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14785,7 +15885,7 @@ func TurnOffFaultToleranceForVM_Task(ctx context.Context, r soap.RoundTripper, r type UnassignUserFromGroupBody struct { Req *types.UnassignUserFromGroup `xml:"urn:vim25 UnassignUserFromGroup,omitempty"` - Res *types.UnassignUserFromGroupResponse `xml:"urn:vim25 UnassignUserFromGroupResponse,omitempty"` + Res *types.UnassignUserFromGroupResponse `xml:"UnassignUserFromGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14805,7 +15905,7 @@ func UnassignUserFromGroup(ctx context.Context, r soap.RoundTripper, req *types. type UnbindVnicBody struct { Req *types.UnbindVnic `xml:"urn:vim25 UnbindVnic,omitempty"` - Res *types.UnbindVnicResponse `xml:"urn:vim25 UnbindVnicResponse,omitempty"` + Res *types.UnbindVnicResponse `xml:"UnbindVnicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14825,7 +15925,7 @@ func UnbindVnic(ctx context.Context, r soap.RoundTripper, req *types.UnbindVnic) type UninstallHostPatch_TaskBody struct { Req *types.UninstallHostPatch_Task `xml:"urn:vim25 UninstallHostPatch_Task,omitempty"` - Res *types.UninstallHostPatch_TaskResponse `xml:"urn:vim25 UninstallHostPatch_TaskResponse,omitempty"` + Res *types.UninstallHostPatch_TaskResponse `xml:"UninstallHostPatch_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14845,7 +15945,7 @@ func UninstallHostPatch_Task(ctx context.Context, r soap.RoundTripper, req *type type UninstallIoFilter_TaskBody struct { Req *types.UninstallIoFilter_Task `xml:"urn:vim25 UninstallIoFilter_Task,omitempty"` - Res *types.UninstallIoFilter_TaskResponse `xml:"urn:vim25 UninstallIoFilter_TaskResponse,omitempty"` + Res *types.UninstallIoFilter_TaskResponse `xml:"UninstallIoFilter_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14865,7 +15965,7 @@ func UninstallIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types type UninstallServiceBody struct { Req *types.UninstallService `xml:"urn:vim25 UninstallService,omitempty"` - Res *types.UninstallServiceResponse `xml:"urn:vim25 UninstallServiceResponse,omitempty"` + Res *types.UninstallServiceResponse `xml:"UninstallServiceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14885,7 +15985,7 @@ func UninstallService(ctx context.Context, r soap.RoundTripper, req *types.Unins type UnmapVmfsVolumeEx_TaskBody struct { Req *types.UnmapVmfsVolumeEx_Task `xml:"urn:vim25 UnmapVmfsVolumeEx_Task,omitempty"` - Res *types.UnmapVmfsVolumeEx_TaskResponse `xml:"urn:vim25 UnmapVmfsVolumeEx_TaskResponse,omitempty"` + Res *types.UnmapVmfsVolumeEx_TaskResponse `xml:"UnmapVmfsVolumeEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14903,9 +16003,29 @@ func UnmapVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *types return resBody.Res, nil } +type UnmarkServiceProviderEntitiesBody struct { + Req *types.UnmarkServiceProviderEntities `xml:"urn:vim25 UnmarkServiceProviderEntities,omitempty"` + Res *types.UnmarkServiceProviderEntitiesResponse `xml:"UnmarkServiceProviderEntitiesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnmarkServiceProviderEntitiesBody) Fault() *soap.Fault { return b.Fault_ } + +func UnmarkServiceProviderEntities(ctx context.Context, r soap.RoundTripper, req *types.UnmarkServiceProviderEntities) (*types.UnmarkServiceProviderEntitiesResponse, error) { + var reqBody, resBody UnmarkServiceProviderEntitiesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UnmountDiskMapping_TaskBody struct { Req *types.UnmountDiskMapping_Task `xml:"urn:vim25 UnmountDiskMapping_Task,omitempty"` - Res *types.UnmountDiskMapping_TaskResponse `xml:"urn:vim25 UnmountDiskMapping_TaskResponse,omitempty"` + Res *types.UnmountDiskMapping_TaskResponse `xml:"UnmountDiskMapping_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14925,7 +16045,7 @@ func UnmountDiskMapping_Task(ctx context.Context, r soap.RoundTripper, req *type type UnmountForceMountedVmfsVolumeBody struct { Req *types.UnmountForceMountedVmfsVolume `xml:"urn:vim25 UnmountForceMountedVmfsVolume,omitempty"` - Res *types.UnmountForceMountedVmfsVolumeResponse `xml:"urn:vim25 UnmountForceMountedVmfsVolumeResponse,omitempty"` + Res *types.UnmountForceMountedVmfsVolumeResponse `xml:"UnmountForceMountedVmfsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14945,7 +16065,7 @@ func UnmountForceMountedVmfsVolume(ctx context.Context, r soap.RoundTripper, req type UnmountToolsInstallerBody struct { Req *types.UnmountToolsInstaller `xml:"urn:vim25 UnmountToolsInstaller,omitempty"` - Res *types.UnmountToolsInstallerResponse `xml:"urn:vim25 UnmountToolsInstallerResponse,omitempty"` + Res *types.UnmountToolsInstallerResponse `xml:"UnmountToolsInstallerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14965,7 +16085,7 @@ func UnmountToolsInstaller(ctx context.Context, r soap.RoundTripper, req *types. type UnmountVffsVolumeBody struct { Req *types.UnmountVffsVolume `xml:"urn:vim25 UnmountVffsVolume,omitempty"` - Res *types.UnmountVffsVolumeResponse `xml:"urn:vim25 UnmountVffsVolumeResponse,omitempty"` + Res *types.UnmountVffsVolumeResponse `xml:"UnmountVffsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -14985,7 +16105,7 @@ func UnmountVffsVolume(ctx context.Context, r soap.RoundTripper, req *types.Unmo type UnmountVmfsVolumeBody struct { Req *types.UnmountVmfsVolume `xml:"urn:vim25 UnmountVmfsVolume,omitempty"` - Res *types.UnmountVmfsVolumeResponse `xml:"urn:vim25 UnmountVmfsVolumeResponse,omitempty"` + Res *types.UnmountVmfsVolumeResponse `xml:"UnmountVmfsVolumeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15005,7 +16125,7 @@ func UnmountVmfsVolume(ctx context.Context, r soap.RoundTripper, req *types.Unmo type UnmountVmfsVolumeEx_TaskBody struct { Req *types.UnmountVmfsVolumeEx_Task `xml:"urn:vim25 UnmountVmfsVolumeEx_Task,omitempty"` - Res *types.UnmountVmfsVolumeEx_TaskResponse `xml:"urn:vim25 UnmountVmfsVolumeEx_TaskResponse,omitempty"` + Res *types.UnmountVmfsVolumeEx_TaskResponse `xml:"UnmountVmfsVolumeEx_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15025,7 +16145,7 @@ func UnmountVmfsVolumeEx_Task(ctx context.Context, r soap.RoundTripper, req *typ type UnregisterAndDestroy_TaskBody struct { Req *types.UnregisterAndDestroy_Task `xml:"urn:vim25 UnregisterAndDestroy_Task,omitempty"` - Res *types.UnregisterAndDestroy_TaskResponse `xml:"urn:vim25 UnregisterAndDestroy_TaskResponse,omitempty"` + Res *types.UnregisterAndDestroy_TaskResponse `xml:"UnregisterAndDestroy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15045,7 +16165,7 @@ func UnregisterAndDestroy_Task(ctx context.Context, r soap.RoundTripper, req *ty type UnregisterExtensionBody struct { Req *types.UnregisterExtension `xml:"urn:vim25 UnregisterExtension,omitempty"` - Res *types.UnregisterExtensionResponse `xml:"urn:vim25 UnregisterExtensionResponse,omitempty"` + Res *types.UnregisterExtensionResponse `xml:"UnregisterExtensionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15065,7 +16185,7 @@ func UnregisterExtension(ctx context.Context, r soap.RoundTripper, req *types.Un type UnregisterHealthUpdateProviderBody struct { Req *types.UnregisterHealthUpdateProvider `xml:"urn:vim25 UnregisterHealthUpdateProvider,omitempty"` - Res *types.UnregisterHealthUpdateProviderResponse `xml:"urn:vim25 UnregisterHealthUpdateProviderResponse,omitempty"` + Res *types.UnregisterHealthUpdateProviderResponse `xml:"UnregisterHealthUpdateProviderResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15083,9 +16203,29 @@ func UnregisterHealthUpdateProvider(ctx context.Context, r soap.RoundTripper, re return resBody.Res, nil } +type UnregisterKmsClusterBody struct { + Req *types.UnregisterKmsCluster `xml:"urn:vim25 UnregisterKmsCluster,omitempty"` + Res *types.UnregisterKmsClusterResponse `xml:"UnregisterKmsClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UnregisterKmsClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func UnregisterKmsCluster(ctx context.Context, r soap.RoundTripper, req *types.UnregisterKmsCluster) (*types.UnregisterKmsClusterResponse, error) { + var reqBody, resBody UnregisterKmsClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UnregisterVMBody struct { Req *types.UnregisterVM `xml:"urn:vim25 UnregisterVM,omitempty"` - Res *types.UnregisterVMResponse `xml:"urn:vim25 UnregisterVMResponse,omitempty"` + Res *types.UnregisterVMResponse `xml:"UnregisterVMResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15105,7 +16245,7 @@ func UnregisterVM(ctx context.Context, r soap.RoundTripper, req *types.Unregiste type UpdateAnswerFile_TaskBody struct { Req *types.UpdateAnswerFile_Task `xml:"urn:vim25 UpdateAnswerFile_Task,omitempty"` - Res *types.UpdateAnswerFile_TaskResponse `xml:"urn:vim25 UpdateAnswerFile_TaskResponse,omitempty"` + Res *types.UpdateAnswerFile_TaskResponse `xml:"UpdateAnswerFile_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15123,9 +16263,29 @@ func UpdateAnswerFile_Task(ctx context.Context, r soap.RoundTripper, req *types. return resBody.Res, nil } +type UpdateAssignableHardwareConfigBody struct { + Req *types.UpdateAssignableHardwareConfig `xml:"urn:vim25 UpdateAssignableHardwareConfig,omitempty"` + Res *types.UpdateAssignableHardwareConfigResponse `xml:"UpdateAssignableHardwareConfigResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateAssignableHardwareConfigBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateAssignableHardwareConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateAssignableHardwareConfig) (*types.UpdateAssignableHardwareConfigResponse, error) { + var reqBody, resBody UpdateAssignableHardwareConfigBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UpdateAssignedLicenseBody struct { Req *types.UpdateAssignedLicense `xml:"urn:vim25 UpdateAssignedLicense,omitempty"` - Res *types.UpdateAssignedLicenseResponse `xml:"urn:vim25 UpdateAssignedLicenseResponse,omitempty"` + Res *types.UpdateAssignedLicenseResponse `xml:"UpdateAssignedLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15145,7 +16305,7 @@ func UpdateAssignedLicense(ctx context.Context, r soap.RoundTripper, req *types. type UpdateAuthorizationRoleBody struct { Req *types.UpdateAuthorizationRole `xml:"urn:vim25 UpdateAuthorizationRole,omitempty"` - Res *types.UpdateAuthorizationRoleResponse `xml:"urn:vim25 UpdateAuthorizationRoleResponse,omitempty"` + Res *types.UpdateAuthorizationRoleResponse `xml:"UpdateAuthorizationRoleResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15165,7 +16325,7 @@ func UpdateAuthorizationRole(ctx context.Context, r soap.RoundTripper, req *type type UpdateBootDeviceBody struct { Req *types.UpdateBootDevice `xml:"urn:vim25 UpdateBootDevice,omitempty"` - Res *types.UpdateBootDeviceResponse `xml:"urn:vim25 UpdateBootDeviceResponse,omitempty"` + Res *types.UpdateBootDeviceResponse `xml:"UpdateBootDeviceResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15185,7 +16345,7 @@ func UpdateBootDevice(ctx context.Context, r soap.RoundTripper, req *types.Updat type UpdateChildResourceConfigurationBody struct { Req *types.UpdateChildResourceConfiguration `xml:"urn:vim25 UpdateChildResourceConfiguration,omitempty"` - Res *types.UpdateChildResourceConfigurationResponse `xml:"urn:vim25 UpdateChildResourceConfigurationResponse,omitempty"` + Res *types.UpdateChildResourceConfigurationResponse `xml:"UpdateChildResourceConfigurationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15205,7 +16365,7 @@ func UpdateChildResourceConfiguration(ctx context.Context, r soap.RoundTripper, type UpdateClusterProfileBody struct { Req *types.UpdateClusterProfile `xml:"urn:vim25 UpdateClusterProfile,omitempty"` - Res *types.UpdateClusterProfileResponse `xml:"urn:vim25 UpdateClusterProfileResponse,omitempty"` + Res *types.UpdateClusterProfileResponse `xml:"UpdateClusterProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15225,7 +16385,7 @@ func UpdateClusterProfile(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateConfigBody struct { Req *types.UpdateConfig `xml:"urn:vim25 UpdateConfig,omitempty"` - Res *types.UpdateConfigResponse `xml:"urn:vim25 UpdateConfigResponse,omitempty"` + Res *types.UpdateConfigResponse `xml:"UpdateConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15245,7 +16405,7 @@ func UpdateConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateCon type UpdateConsoleIpRouteConfigBody struct { Req *types.UpdateConsoleIpRouteConfig `xml:"urn:vim25 UpdateConsoleIpRouteConfig,omitempty"` - Res *types.UpdateConsoleIpRouteConfigResponse `xml:"urn:vim25 UpdateConsoleIpRouteConfigResponse,omitempty"` + Res *types.UpdateConsoleIpRouteConfigResponse `xml:"UpdateConsoleIpRouteConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15265,7 +16425,7 @@ func UpdateConsoleIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *t type UpdateCounterLevelMappingBody struct { Req *types.UpdateCounterLevelMapping `xml:"urn:vim25 UpdateCounterLevelMapping,omitempty"` - Res *types.UpdateCounterLevelMappingResponse `xml:"urn:vim25 UpdateCounterLevelMappingResponse,omitempty"` + Res *types.UpdateCounterLevelMappingResponse `xml:"UpdateCounterLevelMappingResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15285,7 +16445,7 @@ func UpdateCounterLevelMapping(ctx context.Context, r soap.RoundTripper, req *ty type UpdateDVSHealthCheckConfig_TaskBody struct { Req *types.UpdateDVSHealthCheckConfig_Task `xml:"urn:vim25 UpdateDVSHealthCheckConfig_Task,omitempty"` - Res *types.UpdateDVSHealthCheckConfig_TaskResponse `xml:"urn:vim25 UpdateDVSHealthCheckConfig_TaskResponse,omitempty"` + Res *types.UpdateDVSHealthCheckConfig_TaskResponse `xml:"UpdateDVSHealthCheckConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15305,7 +16465,7 @@ func UpdateDVSHealthCheckConfig_Task(ctx context.Context, r soap.RoundTripper, r type UpdateDVSLacpGroupConfig_TaskBody struct { Req *types.UpdateDVSLacpGroupConfig_Task `xml:"urn:vim25 UpdateDVSLacpGroupConfig_Task,omitempty"` - Res *types.UpdateDVSLacpGroupConfig_TaskResponse `xml:"urn:vim25 UpdateDVSLacpGroupConfig_TaskResponse,omitempty"` + Res *types.UpdateDVSLacpGroupConfig_TaskResponse `xml:"UpdateDVSLacpGroupConfig_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15325,7 +16485,7 @@ func UpdateDVSLacpGroupConfig_Task(ctx context.Context, r soap.RoundTripper, req type UpdateDateTimeBody struct { Req *types.UpdateDateTime `xml:"urn:vim25 UpdateDateTime,omitempty"` - Res *types.UpdateDateTimeResponse `xml:"urn:vim25 UpdateDateTimeResponse,omitempty"` + Res *types.UpdateDateTimeResponse `xml:"UpdateDateTimeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15345,7 +16505,7 @@ func UpdateDateTime(ctx context.Context, r soap.RoundTripper, req *types.UpdateD type UpdateDateTimeConfigBody struct { Req *types.UpdateDateTimeConfig `xml:"urn:vim25 UpdateDateTimeConfig,omitempty"` - Res *types.UpdateDateTimeConfigResponse `xml:"urn:vim25 UpdateDateTimeConfigResponse,omitempty"` + Res *types.UpdateDateTimeConfigResponse `xml:"UpdateDateTimeConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15365,7 +16525,7 @@ func UpdateDateTimeConfig(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateDefaultPolicyBody struct { Req *types.UpdateDefaultPolicy `xml:"urn:vim25 UpdateDefaultPolicy,omitempty"` - Res *types.UpdateDefaultPolicyResponse `xml:"urn:vim25 UpdateDefaultPolicyResponse,omitempty"` + Res *types.UpdateDefaultPolicyResponse `xml:"UpdateDefaultPolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15385,7 +16545,7 @@ func UpdateDefaultPolicy(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateDiskPartitionsBody struct { Req *types.UpdateDiskPartitions `xml:"urn:vim25 UpdateDiskPartitions,omitempty"` - Res *types.UpdateDiskPartitionsResponse `xml:"urn:vim25 UpdateDiskPartitionsResponse,omitempty"` + Res *types.UpdateDiskPartitionsResponse `xml:"UpdateDiskPartitionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15405,7 +16565,7 @@ func UpdateDiskPartitions(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateDnsConfigBody struct { Req *types.UpdateDnsConfig `xml:"urn:vim25 UpdateDnsConfig,omitempty"` - Res *types.UpdateDnsConfigResponse `xml:"urn:vim25 UpdateDnsConfigResponse,omitempty"` + Res *types.UpdateDnsConfigResponse `xml:"UpdateDnsConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15425,7 +16585,7 @@ func UpdateDnsConfig(ctx context.Context, r soap.RoundTripper, req *types.Update type UpdateDvsCapabilityBody struct { Req *types.UpdateDvsCapability `xml:"urn:vim25 UpdateDvsCapability,omitempty"` - Res *types.UpdateDvsCapabilityResponse `xml:"urn:vim25 UpdateDvsCapabilityResponse,omitempty"` + Res *types.UpdateDvsCapabilityResponse `xml:"UpdateDvsCapabilityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15445,7 +16605,7 @@ func UpdateDvsCapability(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateExtensionBody struct { Req *types.UpdateExtension `xml:"urn:vim25 UpdateExtension,omitempty"` - Res *types.UpdateExtensionResponse `xml:"urn:vim25 UpdateExtensionResponse,omitempty"` + Res *types.UpdateExtensionResponse `xml:"UpdateExtensionResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15465,7 +16625,7 @@ func UpdateExtension(ctx context.Context, r soap.RoundTripper, req *types.Update type UpdateFlagsBody struct { Req *types.UpdateFlags `xml:"urn:vim25 UpdateFlags,omitempty"` - Res *types.UpdateFlagsResponse `xml:"urn:vim25 UpdateFlagsResponse,omitempty"` + Res *types.UpdateFlagsResponse `xml:"UpdateFlagsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15485,7 +16645,7 @@ func UpdateFlags(ctx context.Context, r soap.RoundTripper, req *types.UpdateFlag type UpdateGraphicsConfigBody struct { Req *types.UpdateGraphicsConfig `xml:"urn:vim25 UpdateGraphicsConfig,omitempty"` - Res *types.UpdateGraphicsConfigResponse `xml:"urn:vim25 UpdateGraphicsConfigResponse,omitempty"` + Res *types.UpdateGraphicsConfigResponse `xml:"UpdateGraphicsConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15503,29 +16663,9 @@ func UpdateGraphicsConfig(ctx context.Context, r soap.RoundTripper, req *types.U return resBody.Res, nil } -type UpdateHostCustomizations_TaskBody struct { - Req *types.UpdateHostCustomizations_Task `xml:"urn:vim25 UpdateHostCustomizations_Task,omitempty"` - Res *types.UpdateHostCustomizations_TaskResponse `xml:"urn:vim25 UpdateHostCustomizations_TaskResponse,omitempty"` - Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` -} - -func (b *UpdateHostCustomizations_TaskBody) Fault() *soap.Fault { return b.Fault_ } - -func UpdateHostCustomizations_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateHostCustomizations_Task) (*types.UpdateHostCustomizations_TaskResponse, error) { - var reqBody, resBody UpdateHostCustomizations_TaskBody - - reqBody.Req = req - - if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { - return nil, err - } - - return resBody.Res, nil -} - type UpdateHostImageAcceptanceLevelBody struct { Req *types.UpdateHostImageAcceptanceLevel `xml:"urn:vim25 UpdateHostImageAcceptanceLevel,omitempty"` - Res *types.UpdateHostImageAcceptanceLevelResponse `xml:"urn:vim25 UpdateHostImageAcceptanceLevelResponse,omitempty"` + Res *types.UpdateHostImageAcceptanceLevelResponse `xml:"UpdateHostImageAcceptanceLevelResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15545,7 +16685,7 @@ func UpdateHostImageAcceptanceLevel(ctx context.Context, r soap.RoundTripper, re type UpdateHostProfileBody struct { Req *types.UpdateHostProfile `xml:"urn:vim25 UpdateHostProfile,omitempty"` - Res *types.UpdateHostProfileResponse `xml:"urn:vim25 UpdateHostProfileResponse,omitempty"` + Res *types.UpdateHostProfileResponse `xml:"UpdateHostProfileResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15565,7 +16705,7 @@ func UpdateHostProfile(ctx context.Context, r soap.RoundTripper, req *types.Upda type UpdateHostSpecificationBody struct { Req *types.UpdateHostSpecification `xml:"urn:vim25 UpdateHostSpecification,omitempty"` - Res *types.UpdateHostSpecificationResponse `xml:"urn:vim25 UpdateHostSpecificationResponse,omitempty"` + Res *types.UpdateHostSpecificationResponse `xml:"UpdateHostSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15585,7 +16725,7 @@ func UpdateHostSpecification(ctx context.Context, r soap.RoundTripper, req *type type UpdateHostSubSpecificationBody struct { Req *types.UpdateHostSubSpecification `xml:"urn:vim25 UpdateHostSubSpecification,omitempty"` - Res *types.UpdateHostSubSpecificationResponse `xml:"urn:vim25 UpdateHostSubSpecificationResponse,omitempty"` + Res *types.UpdateHostSubSpecificationResponse `xml:"UpdateHostSubSpecificationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15603,9 +16743,29 @@ func UpdateHostSubSpecification(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type UpdateHppMultipathLunPolicyBody struct { + Req *types.UpdateHppMultipathLunPolicy `xml:"urn:vim25 UpdateHppMultipathLunPolicy,omitempty"` + Res *types.UpdateHppMultipathLunPolicyResponse `xml:"UpdateHppMultipathLunPolicyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateHppMultipathLunPolicyBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateHppMultipathLunPolicy(ctx context.Context, r soap.RoundTripper, req *types.UpdateHppMultipathLunPolicy) (*types.UpdateHppMultipathLunPolicyResponse, error) { + var reqBody, resBody UpdateHppMultipathLunPolicyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UpdateInternetScsiAdvancedOptionsBody struct { Req *types.UpdateInternetScsiAdvancedOptions `xml:"urn:vim25 UpdateInternetScsiAdvancedOptions,omitempty"` - Res *types.UpdateInternetScsiAdvancedOptionsResponse `xml:"urn:vim25 UpdateInternetScsiAdvancedOptionsResponse,omitempty"` + Res *types.UpdateInternetScsiAdvancedOptionsResponse `xml:"UpdateInternetScsiAdvancedOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15625,7 +16785,7 @@ func UpdateInternetScsiAdvancedOptions(ctx context.Context, r soap.RoundTripper, type UpdateInternetScsiAliasBody struct { Req *types.UpdateInternetScsiAlias `xml:"urn:vim25 UpdateInternetScsiAlias,omitempty"` - Res *types.UpdateInternetScsiAliasResponse `xml:"urn:vim25 UpdateInternetScsiAliasResponse,omitempty"` + Res *types.UpdateInternetScsiAliasResponse `xml:"UpdateInternetScsiAliasResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15645,7 +16805,7 @@ func UpdateInternetScsiAlias(ctx context.Context, r soap.RoundTripper, req *type type UpdateInternetScsiAuthenticationPropertiesBody struct { Req *types.UpdateInternetScsiAuthenticationProperties `xml:"urn:vim25 UpdateInternetScsiAuthenticationProperties,omitempty"` - Res *types.UpdateInternetScsiAuthenticationPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiAuthenticationPropertiesResponse,omitempty"` + Res *types.UpdateInternetScsiAuthenticationPropertiesResponse `xml:"UpdateInternetScsiAuthenticationPropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15665,7 +16825,7 @@ func UpdateInternetScsiAuthenticationProperties(ctx context.Context, r soap.Roun type UpdateInternetScsiDigestPropertiesBody struct { Req *types.UpdateInternetScsiDigestProperties `xml:"urn:vim25 UpdateInternetScsiDigestProperties,omitempty"` - Res *types.UpdateInternetScsiDigestPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiDigestPropertiesResponse,omitempty"` + Res *types.UpdateInternetScsiDigestPropertiesResponse `xml:"UpdateInternetScsiDigestPropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15685,7 +16845,7 @@ func UpdateInternetScsiDigestProperties(ctx context.Context, r soap.RoundTripper type UpdateInternetScsiDiscoveryPropertiesBody struct { Req *types.UpdateInternetScsiDiscoveryProperties `xml:"urn:vim25 UpdateInternetScsiDiscoveryProperties,omitempty"` - Res *types.UpdateInternetScsiDiscoveryPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiDiscoveryPropertiesResponse,omitempty"` + Res *types.UpdateInternetScsiDiscoveryPropertiesResponse `xml:"UpdateInternetScsiDiscoveryPropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15705,7 +16865,7 @@ func UpdateInternetScsiDiscoveryProperties(ctx context.Context, r soap.RoundTrip type UpdateInternetScsiIPPropertiesBody struct { Req *types.UpdateInternetScsiIPProperties `xml:"urn:vim25 UpdateInternetScsiIPProperties,omitempty"` - Res *types.UpdateInternetScsiIPPropertiesResponse `xml:"urn:vim25 UpdateInternetScsiIPPropertiesResponse,omitempty"` + Res *types.UpdateInternetScsiIPPropertiesResponse `xml:"UpdateInternetScsiIPPropertiesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15725,7 +16885,7 @@ func UpdateInternetScsiIPProperties(ctx context.Context, r soap.RoundTripper, re type UpdateInternetScsiNameBody struct { Req *types.UpdateInternetScsiName `xml:"urn:vim25 UpdateInternetScsiName,omitempty"` - Res *types.UpdateInternetScsiNameResponse `xml:"urn:vim25 UpdateInternetScsiNameResponse,omitempty"` + Res *types.UpdateInternetScsiNameResponse `xml:"UpdateInternetScsiNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15745,7 +16905,7 @@ func UpdateInternetScsiName(ctx context.Context, r soap.RoundTripper, req *types type UpdateIpConfigBody struct { Req *types.UpdateIpConfig `xml:"urn:vim25 UpdateIpConfig,omitempty"` - Res *types.UpdateIpConfigResponse `xml:"urn:vim25 UpdateIpConfigResponse,omitempty"` + Res *types.UpdateIpConfigResponse `xml:"UpdateIpConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15765,7 +16925,7 @@ func UpdateIpConfig(ctx context.Context, r soap.RoundTripper, req *types.UpdateI type UpdateIpPoolBody struct { Req *types.UpdateIpPool `xml:"urn:vim25 UpdateIpPool,omitempty"` - Res *types.UpdateIpPoolResponse `xml:"urn:vim25 UpdateIpPoolResponse,omitempty"` + Res *types.UpdateIpPoolResponse `xml:"UpdateIpPoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15785,7 +16945,7 @@ func UpdateIpPool(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpP type UpdateIpRouteConfigBody struct { Req *types.UpdateIpRouteConfig `xml:"urn:vim25 UpdateIpRouteConfig,omitempty"` - Res *types.UpdateIpRouteConfigResponse `xml:"urn:vim25 UpdateIpRouteConfigResponse,omitempty"` + Res *types.UpdateIpRouteConfigResponse `xml:"UpdateIpRouteConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15805,7 +16965,7 @@ func UpdateIpRouteConfig(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateIpRouteTableConfigBody struct { Req *types.UpdateIpRouteTableConfig `xml:"urn:vim25 UpdateIpRouteTableConfig,omitempty"` - Res *types.UpdateIpRouteTableConfigResponse `xml:"urn:vim25 UpdateIpRouteTableConfigResponse,omitempty"` + Res *types.UpdateIpRouteTableConfigResponse `xml:"UpdateIpRouteTableConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15825,7 +16985,7 @@ func UpdateIpRouteTableConfig(ctx context.Context, r soap.RoundTripper, req *typ type UpdateIpmiBody struct { Req *types.UpdateIpmi `xml:"urn:vim25 UpdateIpmi,omitempty"` - Res *types.UpdateIpmiResponse `xml:"urn:vim25 UpdateIpmiResponse,omitempty"` + Res *types.UpdateIpmiResponse `xml:"UpdateIpmiResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15845,7 +17005,7 @@ func UpdateIpmi(ctx context.Context, r soap.RoundTripper, req *types.UpdateIpmi) type UpdateKmipServerBody struct { Req *types.UpdateKmipServer `xml:"urn:vim25 UpdateKmipServer,omitempty"` - Res *types.UpdateKmipServerResponse `xml:"urn:vim25 UpdateKmipServerResponse,omitempty"` + Res *types.UpdateKmipServerResponse `xml:"UpdateKmipServerResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15865,7 +17025,7 @@ func UpdateKmipServer(ctx context.Context, r soap.RoundTripper, req *types.Updat type UpdateKmsSignedCsrClientCertBody struct { Req *types.UpdateKmsSignedCsrClientCert `xml:"urn:vim25 UpdateKmsSignedCsrClientCert,omitempty"` - Res *types.UpdateKmsSignedCsrClientCertResponse `xml:"urn:vim25 UpdateKmsSignedCsrClientCertResponse,omitempty"` + Res *types.UpdateKmsSignedCsrClientCertResponse `xml:"UpdateKmsSignedCsrClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15885,7 +17045,7 @@ func UpdateKmsSignedCsrClientCert(ctx context.Context, r soap.RoundTripper, req type UpdateLicenseBody struct { Req *types.UpdateLicense `xml:"urn:vim25 UpdateLicense,omitempty"` - Res *types.UpdateLicenseResponse `xml:"urn:vim25 UpdateLicenseResponse,omitempty"` + Res *types.UpdateLicenseResponse `xml:"UpdateLicenseResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15905,7 +17065,7 @@ func UpdateLicense(ctx context.Context, r soap.RoundTripper, req *types.UpdateLi type UpdateLicenseLabelBody struct { Req *types.UpdateLicenseLabel `xml:"urn:vim25 UpdateLicenseLabel,omitempty"` - Res *types.UpdateLicenseLabelResponse `xml:"urn:vim25 UpdateLicenseLabelResponse,omitempty"` + Res *types.UpdateLicenseLabelResponse `xml:"UpdateLicenseLabelResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15925,7 +17085,7 @@ func UpdateLicenseLabel(ctx context.Context, r soap.RoundTripper, req *types.Upd type UpdateLinkedChildrenBody struct { Req *types.UpdateLinkedChildren `xml:"urn:vim25 UpdateLinkedChildren,omitempty"` - Res *types.UpdateLinkedChildrenResponse `xml:"urn:vim25 UpdateLinkedChildrenResponse,omitempty"` + Res *types.UpdateLinkedChildrenResponse `xml:"UpdateLinkedChildrenResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15945,7 +17105,7 @@ func UpdateLinkedChildren(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateLocalSwapDatastoreBody struct { Req *types.UpdateLocalSwapDatastore `xml:"urn:vim25 UpdateLocalSwapDatastore,omitempty"` - Res *types.UpdateLocalSwapDatastoreResponse `xml:"urn:vim25 UpdateLocalSwapDatastoreResponse,omitempty"` + Res *types.UpdateLocalSwapDatastoreResponse `xml:"UpdateLocalSwapDatastoreResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15965,7 +17125,7 @@ func UpdateLocalSwapDatastore(ctx context.Context, r soap.RoundTripper, req *typ type UpdateLockdownExceptionsBody struct { Req *types.UpdateLockdownExceptions `xml:"urn:vim25 UpdateLockdownExceptions,omitempty"` - Res *types.UpdateLockdownExceptionsResponse `xml:"urn:vim25 UpdateLockdownExceptionsResponse,omitempty"` + Res *types.UpdateLockdownExceptionsResponse `xml:"UpdateLockdownExceptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -15985,7 +17145,7 @@ func UpdateLockdownExceptions(ctx context.Context, r soap.RoundTripper, req *typ type UpdateModuleOptionStringBody struct { Req *types.UpdateModuleOptionString `xml:"urn:vim25 UpdateModuleOptionString,omitempty"` - Res *types.UpdateModuleOptionStringResponse `xml:"urn:vim25 UpdateModuleOptionStringResponse,omitempty"` + Res *types.UpdateModuleOptionStringResponse `xml:"UpdateModuleOptionStringResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16005,7 +17165,7 @@ func UpdateModuleOptionString(ctx context.Context, r soap.RoundTripper, req *typ type UpdateNetworkConfigBody struct { Req *types.UpdateNetworkConfig `xml:"urn:vim25 UpdateNetworkConfig,omitempty"` - Res *types.UpdateNetworkConfigResponse `xml:"urn:vim25 UpdateNetworkConfigResponse,omitempty"` + Res *types.UpdateNetworkConfigResponse `xml:"UpdateNetworkConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16025,7 +17185,7 @@ func UpdateNetworkConfig(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateNetworkResourcePoolBody struct { Req *types.UpdateNetworkResourcePool `xml:"urn:vim25 UpdateNetworkResourcePool,omitempty"` - Res *types.UpdateNetworkResourcePoolResponse `xml:"urn:vim25 UpdateNetworkResourcePoolResponse,omitempty"` + Res *types.UpdateNetworkResourcePoolResponse `xml:"UpdateNetworkResourcePoolResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16045,7 +17205,7 @@ func UpdateNetworkResourcePool(ctx context.Context, r soap.RoundTripper, req *ty type UpdateOptionsBody struct { Req *types.UpdateOptions `xml:"urn:vim25 UpdateOptions,omitempty"` - Res *types.UpdateOptionsResponse `xml:"urn:vim25 UpdateOptionsResponse,omitempty"` + Res *types.UpdateOptionsResponse `xml:"UpdateOptionsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16065,7 +17225,7 @@ func UpdateOptions(ctx context.Context, r soap.RoundTripper, req *types.UpdateOp type UpdatePassthruConfigBody struct { Req *types.UpdatePassthruConfig `xml:"urn:vim25 UpdatePassthruConfig,omitempty"` - Res *types.UpdatePassthruConfigResponse `xml:"urn:vim25 UpdatePassthruConfigResponse,omitempty"` + Res *types.UpdatePassthruConfigResponse `xml:"UpdatePassthruConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16085,7 +17245,7 @@ func UpdatePassthruConfig(ctx context.Context, r soap.RoundTripper, req *types.U type UpdatePerfIntervalBody struct { Req *types.UpdatePerfInterval `xml:"urn:vim25 UpdatePerfInterval,omitempty"` - Res *types.UpdatePerfIntervalResponse `xml:"urn:vim25 UpdatePerfIntervalResponse,omitempty"` + Res *types.UpdatePerfIntervalResponse `xml:"UpdatePerfIntervalResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16105,7 +17265,7 @@ func UpdatePerfInterval(ctx context.Context, r soap.RoundTripper, req *types.Upd type UpdatePhysicalNicLinkSpeedBody struct { Req *types.UpdatePhysicalNicLinkSpeed `xml:"urn:vim25 UpdatePhysicalNicLinkSpeed,omitempty"` - Res *types.UpdatePhysicalNicLinkSpeedResponse `xml:"urn:vim25 UpdatePhysicalNicLinkSpeedResponse,omitempty"` + Res *types.UpdatePhysicalNicLinkSpeedResponse `xml:"UpdatePhysicalNicLinkSpeedResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16125,7 +17285,7 @@ func UpdatePhysicalNicLinkSpeed(ctx context.Context, r soap.RoundTripper, req *t type UpdatePortGroupBody struct { Req *types.UpdatePortGroup `xml:"urn:vim25 UpdatePortGroup,omitempty"` - Res *types.UpdatePortGroupResponse `xml:"urn:vim25 UpdatePortGroupResponse,omitempty"` + Res *types.UpdatePortGroupResponse `xml:"UpdatePortGroupResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16143,9 +17303,29 @@ func UpdatePortGroup(ctx context.Context, r soap.RoundTripper, req *types.Update return resBody.Res, nil } +type UpdateProductLockerLocation_TaskBody struct { + Req *types.UpdateProductLockerLocation_Task `xml:"urn:vim25 UpdateProductLockerLocation_Task,omitempty"` + Res *types.UpdateProductLockerLocation_TaskResponse `xml:"UpdateProductLockerLocation_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateProductLockerLocation_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateProductLockerLocation_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateProductLockerLocation_Task) (*types.UpdateProductLockerLocation_TaskResponse, error) { + var reqBody, resBody UpdateProductLockerLocation_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UpdateProgressBody struct { Req *types.UpdateProgress `xml:"urn:vim25 UpdateProgress,omitempty"` - Res *types.UpdateProgressResponse `xml:"urn:vim25 UpdateProgressResponse,omitempty"` + Res *types.UpdateProgressResponse `xml:"UpdateProgressResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16165,7 +17345,7 @@ func UpdateProgress(ctx context.Context, r soap.RoundTripper, req *types.UpdateP type UpdateReferenceHostBody struct { Req *types.UpdateReferenceHost `xml:"urn:vim25 UpdateReferenceHost,omitempty"` - Res *types.UpdateReferenceHostResponse `xml:"urn:vim25 UpdateReferenceHostResponse,omitempty"` + Res *types.UpdateReferenceHostResponse `xml:"UpdateReferenceHostResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16185,7 +17365,7 @@ func UpdateReferenceHost(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateRulesetBody struct { Req *types.UpdateRuleset `xml:"urn:vim25 UpdateRuleset,omitempty"` - Res *types.UpdateRulesetResponse `xml:"urn:vim25 UpdateRulesetResponse,omitempty"` + Res *types.UpdateRulesetResponse `xml:"UpdateRulesetResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16205,7 +17385,7 @@ func UpdateRuleset(ctx context.Context, r soap.RoundTripper, req *types.UpdateRu type UpdateScsiLunDisplayNameBody struct { Req *types.UpdateScsiLunDisplayName `xml:"urn:vim25 UpdateScsiLunDisplayName,omitempty"` - Res *types.UpdateScsiLunDisplayNameResponse `xml:"urn:vim25 UpdateScsiLunDisplayNameResponse,omitempty"` + Res *types.UpdateScsiLunDisplayNameResponse `xml:"UpdateScsiLunDisplayNameResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16225,7 +17405,7 @@ func UpdateScsiLunDisplayName(ctx context.Context, r soap.RoundTripper, req *typ type UpdateSelfSignedClientCertBody struct { Req *types.UpdateSelfSignedClientCert `xml:"urn:vim25 UpdateSelfSignedClientCert,omitempty"` - Res *types.UpdateSelfSignedClientCertResponse `xml:"urn:vim25 UpdateSelfSignedClientCertResponse,omitempty"` + Res *types.UpdateSelfSignedClientCertResponse `xml:"UpdateSelfSignedClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16245,7 +17425,7 @@ func UpdateSelfSignedClientCert(ctx context.Context, r soap.RoundTripper, req *t type UpdateServiceConsoleVirtualNicBody struct { Req *types.UpdateServiceConsoleVirtualNic `xml:"urn:vim25 UpdateServiceConsoleVirtualNic,omitempty"` - Res *types.UpdateServiceConsoleVirtualNicResponse `xml:"urn:vim25 UpdateServiceConsoleVirtualNicResponse,omitempty"` + Res *types.UpdateServiceConsoleVirtualNicResponse `xml:"UpdateServiceConsoleVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16265,7 +17445,7 @@ func UpdateServiceConsoleVirtualNic(ctx context.Context, r soap.RoundTripper, re type UpdateServiceMessageBody struct { Req *types.UpdateServiceMessage `xml:"urn:vim25 UpdateServiceMessage,omitempty"` - Res *types.UpdateServiceMessageResponse `xml:"urn:vim25 UpdateServiceMessageResponse,omitempty"` + Res *types.UpdateServiceMessageResponse `xml:"UpdateServiceMessageResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16285,7 +17465,7 @@ func UpdateServiceMessage(ctx context.Context, r soap.RoundTripper, req *types.U type UpdateServicePolicyBody struct { Req *types.UpdateServicePolicy `xml:"urn:vim25 UpdateServicePolicy,omitempty"` - Res *types.UpdateServicePolicyResponse `xml:"urn:vim25 UpdateServicePolicyResponse,omitempty"` + Res *types.UpdateServicePolicyResponse `xml:"UpdateServicePolicyResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16305,7 +17485,7 @@ func UpdateServicePolicy(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateSoftwareInternetScsiEnabledBody struct { Req *types.UpdateSoftwareInternetScsiEnabled `xml:"urn:vim25 UpdateSoftwareInternetScsiEnabled,omitempty"` - Res *types.UpdateSoftwareInternetScsiEnabledResponse `xml:"urn:vim25 UpdateSoftwareInternetScsiEnabledResponse,omitempty"` + Res *types.UpdateSoftwareInternetScsiEnabledResponse `xml:"UpdateSoftwareInternetScsiEnabledResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16325,7 +17505,7 @@ func UpdateSoftwareInternetScsiEnabled(ctx context.Context, r soap.RoundTripper, type UpdateSystemResourcesBody struct { Req *types.UpdateSystemResources `xml:"urn:vim25 UpdateSystemResources,omitempty"` - Res *types.UpdateSystemResourcesResponse `xml:"urn:vim25 UpdateSystemResourcesResponse,omitempty"` + Res *types.UpdateSystemResourcesResponse `xml:"UpdateSystemResourcesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16345,7 +17525,7 @@ func UpdateSystemResources(ctx context.Context, r soap.RoundTripper, req *types. type UpdateSystemSwapConfigurationBody struct { Req *types.UpdateSystemSwapConfiguration `xml:"urn:vim25 UpdateSystemSwapConfiguration,omitempty"` - Res *types.UpdateSystemSwapConfigurationResponse `xml:"urn:vim25 UpdateSystemSwapConfigurationResponse,omitempty"` + Res *types.UpdateSystemSwapConfigurationResponse `xml:"UpdateSystemSwapConfigurationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16365,7 +17545,7 @@ func UpdateSystemSwapConfiguration(ctx context.Context, r soap.RoundTripper, req type UpdateSystemUsersBody struct { Req *types.UpdateSystemUsers `xml:"urn:vim25 UpdateSystemUsers,omitempty"` - Res *types.UpdateSystemUsersResponse `xml:"urn:vim25 UpdateSystemUsersResponse,omitempty"` + Res *types.UpdateSystemUsersResponse `xml:"UpdateSystemUsersResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16385,7 +17565,7 @@ func UpdateSystemUsers(ctx context.Context, r soap.RoundTripper, req *types.Upda type UpdateUserBody struct { Req *types.UpdateUser `xml:"urn:vim25 UpdateUser,omitempty"` - Res *types.UpdateUserResponse `xml:"urn:vim25 UpdateUserResponse,omitempty"` + Res *types.UpdateUserResponse `xml:"UpdateUserResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16405,7 +17585,7 @@ func UpdateUser(ctx context.Context, r soap.RoundTripper, req *types.UpdateUser) type UpdateVAppConfigBody struct { Req *types.UpdateVAppConfig `xml:"urn:vim25 UpdateVAppConfig,omitempty"` - Res *types.UpdateVAppConfigResponse `xml:"urn:vim25 UpdateVAppConfigResponse,omitempty"` + Res *types.UpdateVAppConfigResponse `xml:"UpdateVAppConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16425,7 +17605,7 @@ func UpdateVAppConfig(ctx context.Context, r soap.RoundTripper, req *types.Updat type UpdateVStorageInfrastructureObjectPolicy_TaskBody struct { Req *types.UpdateVStorageInfrastructureObjectPolicy_Task `xml:"urn:vim25 UpdateVStorageInfrastructureObjectPolicy_Task,omitempty"` - Res *types.UpdateVStorageInfrastructureObjectPolicy_TaskResponse `xml:"urn:vim25 UpdateVStorageInfrastructureObjectPolicy_TaskResponse,omitempty"` + Res *types.UpdateVStorageInfrastructureObjectPolicy_TaskResponse `xml:"UpdateVStorageInfrastructureObjectPolicy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16443,9 +17623,29 @@ func UpdateVStorageInfrastructureObjectPolicy_Task(ctx context.Context, r soap.R return resBody.Res, nil } +type UpdateVStorageObjectCrypto_TaskBody struct { + Req *types.UpdateVStorageObjectCrypto_Task `xml:"urn:vim25 UpdateVStorageObjectCrypto_Task,omitempty"` + Res *types.UpdateVStorageObjectCrypto_TaskResponse `xml:"UpdateVStorageObjectCrypto_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *UpdateVStorageObjectCrypto_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func UpdateVStorageObjectCrypto_Task(ctx context.Context, r soap.RoundTripper, req *types.UpdateVStorageObjectCrypto_Task) (*types.UpdateVStorageObjectCrypto_TaskResponse, error) { + var reqBody, resBody UpdateVStorageObjectCrypto_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UpdateVStorageObjectPolicy_TaskBody struct { Req *types.UpdateVStorageObjectPolicy_Task `xml:"urn:vim25 UpdateVStorageObjectPolicy_Task,omitempty"` - Res *types.UpdateVStorageObjectPolicy_TaskResponse `xml:"urn:vim25 UpdateVStorageObjectPolicy_TaskResponse,omitempty"` + Res *types.UpdateVStorageObjectPolicy_TaskResponse `xml:"UpdateVStorageObjectPolicy_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16465,7 +17665,7 @@ func UpdateVStorageObjectPolicy_Task(ctx context.Context, r soap.RoundTripper, r type UpdateVVolVirtualMachineFiles_TaskBody struct { Req *types.UpdateVVolVirtualMachineFiles_Task `xml:"urn:vim25 UpdateVVolVirtualMachineFiles_Task,omitempty"` - Res *types.UpdateVVolVirtualMachineFiles_TaskResponse `xml:"urn:vim25 UpdateVVolVirtualMachineFiles_TaskResponse,omitempty"` + Res *types.UpdateVVolVirtualMachineFiles_TaskResponse `xml:"UpdateVVolVirtualMachineFiles_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16485,7 +17685,7 @@ func UpdateVVolVirtualMachineFiles_Task(ctx context.Context, r soap.RoundTripper type UpdateVirtualMachineFiles_TaskBody struct { Req *types.UpdateVirtualMachineFiles_Task `xml:"urn:vim25 UpdateVirtualMachineFiles_Task,omitempty"` - Res *types.UpdateVirtualMachineFiles_TaskResponse `xml:"urn:vim25 UpdateVirtualMachineFiles_TaskResponse,omitempty"` + Res *types.UpdateVirtualMachineFiles_TaskResponse `xml:"UpdateVirtualMachineFiles_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16505,7 +17705,7 @@ func UpdateVirtualMachineFiles_Task(ctx context.Context, r soap.RoundTripper, re type UpdateVirtualNicBody struct { Req *types.UpdateVirtualNic `xml:"urn:vim25 UpdateVirtualNic,omitempty"` - Res *types.UpdateVirtualNicResponse `xml:"urn:vim25 UpdateVirtualNicResponse,omitempty"` + Res *types.UpdateVirtualNicResponse `xml:"UpdateVirtualNicResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16525,7 +17725,7 @@ func UpdateVirtualNic(ctx context.Context, r soap.RoundTripper, req *types.Updat type UpdateVirtualSwitchBody struct { Req *types.UpdateVirtualSwitch `xml:"urn:vim25 UpdateVirtualSwitch,omitempty"` - Res *types.UpdateVirtualSwitchResponse `xml:"urn:vim25 UpdateVirtualSwitchResponse,omitempty"` + Res *types.UpdateVirtualSwitchResponse `xml:"UpdateVirtualSwitchResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16545,7 +17745,7 @@ func UpdateVirtualSwitch(ctx context.Context, r soap.RoundTripper, req *types.Up type UpdateVmfsUnmapBandwidthBody struct { Req *types.UpdateVmfsUnmapBandwidth `xml:"urn:vim25 UpdateVmfsUnmapBandwidth,omitempty"` - Res *types.UpdateVmfsUnmapBandwidthResponse `xml:"urn:vim25 UpdateVmfsUnmapBandwidthResponse,omitempty"` + Res *types.UpdateVmfsUnmapBandwidthResponse `xml:"UpdateVmfsUnmapBandwidthResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16565,7 +17765,7 @@ func UpdateVmfsUnmapBandwidth(ctx context.Context, r soap.RoundTripper, req *typ type UpdateVmfsUnmapPriorityBody struct { Req *types.UpdateVmfsUnmapPriority `xml:"urn:vim25 UpdateVmfsUnmapPriority,omitempty"` - Res *types.UpdateVmfsUnmapPriorityResponse `xml:"urn:vim25 UpdateVmfsUnmapPriorityResponse,omitempty"` + Res *types.UpdateVmfsUnmapPriorityResponse `xml:"UpdateVmfsUnmapPriorityResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16585,7 +17785,7 @@ func UpdateVmfsUnmapPriority(ctx context.Context, r soap.RoundTripper, req *type type UpdateVsan_TaskBody struct { Req *types.UpdateVsan_Task `xml:"urn:vim25 UpdateVsan_Task,omitempty"` - Res *types.UpdateVsan_TaskResponse `xml:"urn:vim25 UpdateVsan_TaskResponse,omitempty"` + Res *types.UpdateVsan_TaskResponse `xml:"UpdateVsan_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16605,7 +17805,7 @@ func UpdateVsan_Task(ctx context.Context, r soap.RoundTripper, req *types.Update type UpgradeIoFilter_TaskBody struct { Req *types.UpgradeIoFilter_Task `xml:"urn:vim25 UpgradeIoFilter_Task,omitempty"` - Res *types.UpgradeIoFilter_TaskResponse `xml:"urn:vim25 UpgradeIoFilter_TaskResponse,omitempty"` + Res *types.UpgradeIoFilter_TaskResponse `xml:"UpgradeIoFilter_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16625,7 +17825,7 @@ func UpgradeIoFilter_Task(ctx context.Context, r soap.RoundTripper, req *types.U type UpgradeTools_TaskBody struct { Req *types.UpgradeTools_Task `xml:"urn:vim25 UpgradeTools_Task,omitempty"` - Res *types.UpgradeTools_TaskResponse `xml:"urn:vim25 UpgradeTools_TaskResponse,omitempty"` + Res *types.UpgradeTools_TaskResponse `xml:"UpgradeTools_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16645,7 +17845,7 @@ func UpgradeTools_Task(ctx context.Context, r soap.RoundTripper, req *types.Upgr type UpgradeVM_TaskBody struct { Req *types.UpgradeVM_Task `xml:"urn:vim25 UpgradeVM_Task,omitempty"` - Res *types.UpgradeVM_TaskResponse `xml:"urn:vim25 UpgradeVM_TaskResponse,omitempty"` + Res *types.UpgradeVM_TaskResponse `xml:"UpgradeVM_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16665,7 +17865,7 @@ func UpgradeVM_Task(ctx context.Context, r soap.RoundTripper, req *types.Upgrade type UpgradeVmLayoutBody struct { Req *types.UpgradeVmLayout `xml:"urn:vim25 UpgradeVmLayout,omitempty"` - Res *types.UpgradeVmLayoutResponse `xml:"urn:vim25 UpgradeVmLayoutResponse,omitempty"` + Res *types.UpgradeVmLayoutResponse `xml:"UpgradeVmLayoutResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16685,7 +17885,7 @@ func UpgradeVmLayout(ctx context.Context, r soap.RoundTripper, req *types.Upgrad type UpgradeVmfsBody struct { Req *types.UpgradeVmfs `xml:"urn:vim25 UpgradeVmfs,omitempty"` - Res *types.UpgradeVmfsResponse `xml:"urn:vim25 UpgradeVmfsResponse,omitempty"` + Res *types.UpgradeVmfsResponse `xml:"UpgradeVmfsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16705,7 +17905,7 @@ func UpgradeVmfs(ctx context.Context, r soap.RoundTripper, req *types.UpgradeVmf type UpgradeVsanObjectsBody struct { Req *types.UpgradeVsanObjects `xml:"urn:vim25 UpgradeVsanObjects,omitempty"` - Res *types.UpgradeVsanObjectsResponse `xml:"urn:vim25 UpgradeVsanObjectsResponse,omitempty"` + Res *types.UpgradeVsanObjectsResponse `xml:"UpgradeVsanObjectsResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16725,7 +17925,7 @@ func UpgradeVsanObjects(ctx context.Context, r soap.RoundTripper, req *types.Upg type UploadClientCertBody struct { Req *types.UploadClientCert `xml:"urn:vim25 UploadClientCert,omitempty"` - Res *types.UploadClientCertResponse `xml:"urn:vim25 UploadClientCertResponse,omitempty"` + Res *types.UploadClientCertResponse `xml:"UploadClientCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16745,7 +17945,7 @@ func UploadClientCert(ctx context.Context, r soap.RoundTripper, req *types.Uploa type UploadKmipServerCertBody struct { Req *types.UploadKmipServerCert `xml:"urn:vim25 UploadKmipServerCert,omitempty"` - Res *types.UploadKmipServerCertResponse `xml:"urn:vim25 UploadKmipServerCertResponse,omitempty"` + Res *types.UploadKmipServerCertResponse `xml:"UploadKmipServerCertResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16763,9 +17963,29 @@ func UploadKmipServerCert(ctx context.Context, r soap.RoundTripper, req *types.U return resBody.Res, nil } +type VCenterUpdateVStorageObjectMetadataEx_TaskBody struct { + Req *types.VCenterUpdateVStorageObjectMetadataEx_Task `xml:"urn:vim25 VCenterUpdateVStorageObjectMetadataEx_Task,omitempty"` + Res *types.VCenterUpdateVStorageObjectMetadataEx_TaskResponse `xml:"VCenterUpdateVStorageObjectMetadataEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *VCenterUpdateVStorageObjectMetadataEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func VCenterUpdateVStorageObjectMetadataEx_Task(ctx context.Context, r soap.RoundTripper, req *types.VCenterUpdateVStorageObjectMetadataEx_Task) (*types.VCenterUpdateVStorageObjectMetadataEx_TaskResponse, error) { + var reqBody, resBody VCenterUpdateVStorageObjectMetadataEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type VStorageObjectCreateSnapshot_TaskBody struct { Req *types.VStorageObjectCreateSnapshot_Task `xml:"urn:vim25 VStorageObjectCreateSnapshot_Task,omitempty"` - Res *types.VStorageObjectCreateSnapshot_TaskResponse `xml:"urn:vim25 VStorageObjectCreateSnapshot_TaskResponse,omitempty"` + Res *types.VStorageObjectCreateSnapshot_TaskResponse `xml:"VStorageObjectCreateSnapshot_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16785,7 +18005,7 @@ func VStorageObjectCreateSnapshot_Task(ctx context.Context, r soap.RoundTripper, type ValidateCredentialsInGuestBody struct { Req *types.ValidateCredentialsInGuest `xml:"urn:vim25 ValidateCredentialsInGuest,omitempty"` - Res *types.ValidateCredentialsInGuestResponse `xml:"urn:vim25 ValidateCredentialsInGuestResponse,omitempty"` + Res *types.ValidateCredentialsInGuestResponse `xml:"ValidateCredentialsInGuestResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16803,9 +18023,29 @@ func ValidateCredentialsInGuest(ctx context.Context, r soap.RoundTripper, req *t return resBody.Res, nil } +type ValidateHCIConfigurationBody struct { + Req *types.ValidateHCIConfiguration `xml:"urn:vim25 ValidateHCIConfiguration,omitempty"` + Res *types.ValidateHCIConfigurationResponse `xml:"ValidateHCIConfigurationResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ValidateHCIConfigurationBody) Fault() *soap.Fault { return b.Fault_ } + +func ValidateHCIConfiguration(ctx context.Context, r soap.RoundTripper, req *types.ValidateHCIConfiguration) (*types.ValidateHCIConfigurationResponse, error) { + var reqBody, resBody ValidateHCIConfigurationBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ValidateHostBody struct { Req *types.ValidateHost `xml:"urn:vim25 ValidateHost,omitempty"` - Res *types.ValidateHostResponse `xml:"urn:vim25 ValidateHostResponse,omitempty"` + Res *types.ValidateHostResponse `xml:"ValidateHostResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16825,7 +18065,7 @@ func ValidateHost(ctx context.Context, r soap.RoundTripper, req *types.ValidateH type ValidateHostProfileComposition_TaskBody struct { Req *types.ValidateHostProfileComposition_Task `xml:"urn:vim25 ValidateHostProfileComposition_Task,omitempty"` - Res *types.ValidateHostProfileComposition_TaskResponse `xml:"urn:vim25 ValidateHostProfileComposition_TaskResponse,omitempty"` + Res *types.ValidateHostProfileComposition_TaskResponse `xml:"ValidateHostProfileComposition_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16845,7 +18085,7 @@ func ValidateHostProfileComposition_Task(ctx context.Context, r soap.RoundTrippe type ValidateMigrationBody struct { Req *types.ValidateMigration `xml:"urn:vim25 ValidateMigration,omitempty"` - Res *types.ValidateMigrationResponse `xml:"urn:vim25 ValidateMigrationResponse,omitempty"` + Res *types.ValidateMigrationResponse `xml:"ValidateMigrationResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16865,7 +18105,7 @@ func ValidateMigration(ctx context.Context, r soap.RoundTripper, req *types.Vali type ValidateStoragePodConfigBody struct { Req *types.ValidateStoragePodConfig `xml:"urn:vim25 ValidateStoragePodConfig,omitempty"` - Res *types.ValidateStoragePodConfigResponse `xml:"urn:vim25 ValidateStoragePodConfigResponse,omitempty"` + Res *types.ValidateStoragePodConfigResponse `xml:"ValidateStoragePodConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16883,9 +18123,29 @@ func ValidateStoragePodConfig(ctx context.Context, r soap.RoundTripper, req *typ return resBody.Res, nil } +type VstorageObjectVCenterQueryChangedDiskAreasBody struct { + Req *types.VstorageObjectVCenterQueryChangedDiskAreas `xml:"urn:vim25 VstorageObjectVCenterQueryChangedDiskAreas,omitempty"` + Res *types.VstorageObjectVCenterQueryChangedDiskAreasResponse `xml:"VstorageObjectVCenterQueryChangedDiskAreasResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *VstorageObjectVCenterQueryChangedDiskAreasBody) Fault() *soap.Fault { return b.Fault_ } + +func VstorageObjectVCenterQueryChangedDiskAreas(ctx context.Context, r soap.RoundTripper, req *types.VstorageObjectVCenterQueryChangedDiskAreas) (*types.VstorageObjectVCenterQueryChangedDiskAreasResponse, error) { + var reqBody, resBody VstorageObjectVCenterQueryChangedDiskAreasBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type WaitForUpdatesBody struct { Req *types.WaitForUpdates `xml:"urn:vim25 WaitForUpdates,omitempty"` - Res *types.WaitForUpdatesResponse `xml:"urn:vim25 WaitForUpdatesResponse,omitempty"` + Res *types.WaitForUpdatesResponse `xml:"WaitForUpdatesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16905,7 +18165,7 @@ func WaitForUpdates(ctx context.Context, r soap.RoundTripper, req *types.WaitFor type WaitForUpdatesExBody struct { Req *types.WaitForUpdatesEx `xml:"urn:vim25 WaitForUpdatesEx,omitempty"` - Res *types.WaitForUpdatesExResponse `xml:"urn:vim25 WaitForUpdatesExResponse,omitempty"` + Res *types.WaitForUpdatesExResponse `xml:"WaitForUpdatesExResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16925,7 +18185,7 @@ func WaitForUpdatesEx(ctx context.Context, r soap.RoundTripper, req *types.WaitF type XmlToCustomizationSpecItemBody struct { Req *types.XmlToCustomizationSpecItem `xml:"urn:vim25 XmlToCustomizationSpecItem,omitempty"` - Res *types.XmlToCustomizationSpecItemResponse `xml:"urn:vim25 XmlToCustomizationSpecItemResponse,omitempty"` + Res *types.XmlToCustomizationSpecItemResponse `xml:"XmlToCustomizationSpecItemResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16945,7 +18205,7 @@ func XmlToCustomizationSpecItem(ctx context.Context, r soap.RoundTripper, req *t type ZeroFillVirtualDisk_TaskBody struct { Req *types.ZeroFillVirtualDisk_Task `xml:"urn:vim25 ZeroFillVirtualDisk_Task,omitempty"` - Res *types.ZeroFillVirtualDisk_TaskResponse `xml:"urn:vim25 ZeroFillVirtualDisk_TaskResponse,omitempty"` + Res *types.ZeroFillVirtualDisk_TaskResponse `xml:"ZeroFillVirtualDisk_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16965,7 +18225,7 @@ func ZeroFillVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *typ type ConfigureVcha_TaskBody struct { Req *types.ConfigureVcha_Task `xml:"urn:vim25 configureVcha_Task,omitempty"` - Res *types.ConfigureVcha_TaskResponse `xml:"urn:vim25 configureVcha_TaskResponse,omitempty"` + Res *types.ConfigureVcha_TaskResponse `xml:"configureVcha_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -16985,7 +18245,7 @@ func ConfigureVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.Con type CreatePassiveNode_TaskBody struct { Req *types.CreatePassiveNode_Task `xml:"urn:vim25 createPassiveNode_Task,omitempty"` - Res *types.CreatePassiveNode_TaskResponse `xml:"urn:vim25 createPassiveNode_TaskResponse,omitempty"` + Res *types.CreatePassiveNode_TaskResponse `xml:"createPassiveNode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17005,7 +18265,7 @@ func CreatePassiveNode_Task(ctx context.Context, r soap.RoundTripper, req *types type CreateWitnessNode_TaskBody struct { Req *types.CreateWitnessNode_Task `xml:"urn:vim25 createWitnessNode_Task,omitempty"` - Res *types.CreateWitnessNode_TaskResponse `xml:"urn:vim25 createWitnessNode_TaskResponse,omitempty"` + Res *types.CreateWitnessNode_TaskResponse `xml:"createWitnessNode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17025,7 +18285,7 @@ func CreateWitnessNode_Task(ctx context.Context, r soap.RoundTripper, req *types type DeployVcha_TaskBody struct { Req *types.DeployVcha_Task `xml:"urn:vim25 deployVcha_Task,omitempty"` - Res *types.DeployVcha_TaskResponse `xml:"urn:vim25 deployVcha_TaskResponse,omitempty"` + Res *types.DeployVcha_TaskResponse `xml:"deployVcha_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17045,7 +18305,7 @@ func DeployVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.Deploy type DestroyVcha_TaskBody struct { Req *types.DestroyVcha_Task `xml:"urn:vim25 destroyVcha_Task,omitempty"` - Res *types.DestroyVcha_TaskResponse `xml:"urn:vim25 destroyVcha_TaskResponse,omitempty"` + Res *types.DestroyVcha_TaskResponse `xml:"destroyVcha_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17065,7 +18325,7 @@ func DestroyVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.Destr type FetchSoftwarePackagesBody struct { Req *types.FetchSoftwarePackages `xml:"urn:vim25 fetchSoftwarePackages,omitempty"` - Res *types.FetchSoftwarePackagesResponse `xml:"urn:vim25 fetchSoftwarePackagesResponse,omitempty"` + Res *types.FetchSoftwarePackagesResponse `xml:"fetchSoftwarePackagesResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17085,7 +18345,7 @@ func FetchSoftwarePackages(ctx context.Context, r soap.RoundTripper, req *types. type GetClusterModeBody struct { Req *types.GetClusterMode `xml:"urn:vim25 getClusterMode,omitempty"` - Res *types.GetClusterModeResponse `xml:"urn:vim25 getClusterModeResponse,omitempty"` + Res *types.GetClusterModeResponse `xml:"getClusterModeResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17105,7 +18365,7 @@ func GetClusterMode(ctx context.Context, r soap.RoundTripper, req *types.GetClus type GetVchaConfigBody struct { Req *types.GetVchaConfig `xml:"urn:vim25 getVchaConfig,omitempty"` - Res *types.GetVchaConfigResponse `xml:"urn:vim25 getVchaConfigResponse,omitempty"` + Res *types.GetVchaConfigResponse `xml:"getVchaConfigResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17125,7 +18385,7 @@ func GetVchaConfig(ctx context.Context, r soap.RoundTripper, req *types.GetVchaC type InitiateFailover_TaskBody struct { Req *types.InitiateFailover_Task `xml:"urn:vim25 initiateFailover_Task,omitempty"` - Res *types.InitiateFailover_TaskResponse `xml:"urn:vim25 initiateFailover_TaskResponse,omitempty"` + Res *types.InitiateFailover_TaskResponse `xml:"initiateFailover_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17145,7 +18405,7 @@ func InitiateFailover_Task(ctx context.Context, r soap.RoundTripper, req *types. type InstallDateBody struct { Req *types.InstallDate `xml:"urn:vim25 installDate,omitempty"` - Res *types.InstallDateResponse `xml:"urn:vim25 installDateResponse,omitempty"` + Res *types.InstallDateResponse `xml:"installDateResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17165,7 +18425,7 @@ func InstallDate(ctx context.Context, r soap.RoundTripper, req *types.InstallDat type PrepareVcha_TaskBody struct { Req *types.PrepareVcha_Task `xml:"urn:vim25 prepareVcha_Task,omitempty"` - Res *types.PrepareVcha_TaskResponse `xml:"urn:vim25 prepareVcha_TaskResponse,omitempty"` + Res *types.PrepareVcha_TaskResponse `xml:"prepareVcha_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17185,7 +18445,7 @@ func PrepareVcha_Task(ctx context.Context, r soap.RoundTripper, req *types.Prepa type QueryDatacenterConfigOptionDescriptorBody struct { Req *types.QueryDatacenterConfigOptionDescriptor `xml:"urn:vim25 queryDatacenterConfigOptionDescriptor,omitempty"` - Res *types.QueryDatacenterConfigOptionDescriptorResponse `xml:"urn:vim25 queryDatacenterConfigOptionDescriptorResponse,omitempty"` + Res *types.QueryDatacenterConfigOptionDescriptorResponse `xml:"queryDatacenterConfigOptionDescriptorResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17205,7 +18465,7 @@ func QueryDatacenterConfigOptionDescriptor(ctx context.Context, r soap.RoundTrip type ReloadVirtualMachineFromPath_TaskBody struct { Req *types.ReloadVirtualMachineFromPath_Task `xml:"urn:vim25 reloadVirtualMachineFromPath_Task,omitempty"` - Res *types.ReloadVirtualMachineFromPath_TaskResponse `xml:"urn:vim25 reloadVirtualMachineFromPath_TaskResponse,omitempty"` + Res *types.ReloadVirtualMachineFromPath_TaskResponse `xml:"reloadVirtualMachineFromPath_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17225,7 +18485,7 @@ func ReloadVirtualMachineFromPath_Task(ctx context.Context, r soap.RoundTripper, type SetClusterMode_TaskBody struct { Req *types.SetClusterMode_Task `xml:"urn:vim25 setClusterMode_Task,omitempty"` - Res *types.SetClusterMode_TaskResponse `xml:"urn:vim25 setClusterMode_TaskResponse,omitempty"` + Res *types.SetClusterMode_TaskResponse `xml:"setClusterMode_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17245,7 +18505,7 @@ func SetClusterMode_Task(ctx context.Context, r soap.RoundTripper, req *types.Se type SetCustomValueBody struct { Req *types.SetCustomValue `xml:"urn:vim25 setCustomValue,omitempty"` - Res *types.SetCustomValueResponse `xml:"urn:vim25 setCustomValueResponse,omitempty"` + Res *types.SetCustomValueResponse `xml:"setCustomValueResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } @@ -17265,7 +18525,7 @@ func SetCustomValue(ctx context.Context, r soap.RoundTripper, req *types.SetCust type UnregisterVApp_TaskBody struct { Req *types.UnregisterVApp_Task `xml:"urn:vim25 unregisterVApp_Task,omitempty"` - Res *types.UnregisterVApp_TaskResponse `xml:"urn:vim25 unregisterVApp_TaskResponse,omitempty"` + Res *types.UnregisterVApp_TaskResponse `xml:"unregisterVApp_TaskResponse,omitempty"` Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` } diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/unreleased.go b/vendor/github.com/vmware/govmomi/vim25/methods/unreleased.go new file mode 100644 index 000000000..a0ffff8c3 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/methods/unreleased.go @@ -0,0 +1,44 @@ +/* + Copyright (c) 2022 VMware, Inc. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package methods + +import ( + "context" + + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +type PlaceVmsXClusterBody struct { + Req *types.PlaceVmsXCluster `xml:"urn:vim25 PlaceVmsXCluster,omitempty"` + Res *types.PlaceVmsXClusterResponse `xml:"PlaceVmsXClusterResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *PlaceVmsXClusterBody) Fault() *soap.Fault { return b.Fault_ } + +func PlaceVmsXCluster(ctx context.Context, r soap.RoundTripper, req *types.PlaceVmsXCluster) (*types.PlaceVmsXClusterResponse, error) { + var reqBody, resBody PlaceVmsXClusterBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/mo.go b/vendor/github.com/vmware/govmomi/vim25/mo/mo.go index 4f19988e3..427d579ee 100644 --- a/vendor/github.com/vmware/govmomi/vim25/mo/mo.go +++ b/vendor/github.com/vmware/govmomi/vim25/mo/mo.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -79,12 +79,13 @@ func init() { type ClusterComputeResource struct { ComputeResource - Configuration types.ClusterConfigInfo `mo:"configuration"` - Recommendation []types.ClusterRecommendation `mo:"recommendation"` - DrsRecommendation []types.ClusterDrsRecommendation `mo:"drsRecommendation"` - MigrationHistory []types.ClusterDrsMigration `mo:"migrationHistory"` - ActionHistory []types.ClusterActionHistory `mo:"actionHistory"` - DrsFault []types.ClusterDrsFaults `mo:"drsFault"` + Configuration types.ClusterConfigInfo `mo:"configuration"` + Recommendation []types.ClusterRecommendation `mo:"recommendation"` + DrsRecommendation []types.ClusterDrsRecommendation `mo:"drsRecommendation"` + HciConfig *types.ClusterComputeResourceHCIConfigInfo `mo:"hciConfig"` + MigrationHistory []types.ClusterDrsMigration `mo:"migrationHistory"` + ActionHistory []types.ClusterActionHistory `mo:"actionHistory"` + DrsFault []types.ClusterDrsFaults `mo:"drsFault"` } func init() { @@ -128,6 +129,7 @@ type ComputeResource struct { Summary types.BaseComputeResourceSummary `mo:"summary"` EnvironmentBrowser *types.ManagedObjectReference `mo:"environmentBrowser"` ConfigurationEx types.BaseComputeResourceConfigInfo `mo:"configurationEx"` + LifecycleManaged *bool `mo:"lifecycleManaged"` } func (m *ComputeResource) Entity() *ManagedEntity { @@ -441,6 +443,7 @@ type Folder struct { ChildType []string `mo:"childType"` ChildEntity []types.ManagedObjectReference `mo:"childEntity"` + Namespace *string `mo:"namespace"` } func (m *Folder) Entity() *ManagedEntity { @@ -577,6 +580,21 @@ func init() { t["HostActiveDirectoryAuthentication"] = reflect.TypeOf((*HostActiveDirectoryAuthentication)(nil)).Elem() } +type HostAssignableHardwareManager struct { + Self types.ManagedObjectReference + + Binding []types.HostAssignableHardwareBinding `mo:"binding"` + Config types.HostAssignableHardwareConfig `mo:"config"` +} + +func (m HostAssignableHardwareManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["HostAssignableHardwareManager"] = reflect.TypeOf((*HostAssignableHardwareManager)(nil)).Elem() +} + type HostAuthenticationManager struct { Self types.ManagedObjectReference @@ -1265,10 +1283,10 @@ func init() { type Network struct { ManagedEntity - Name string `mo:"name"` Summary types.BaseNetworkSummary `mo:"summary"` Host []types.ManagedObjectReference `mo:"host"` Vm []types.ManagedObjectReference `mo:"vm"` + Name string `mo:"name"` } func (m *Network) Entity() *ManagedEntity { @@ -1444,6 +1462,7 @@ type ResourcePool struct { ResourcePool []types.ManagedObjectReference `mo:"resourcePool"` Vm []types.ManagedObjectReference `mo:"vm"` Config types.ResourceConfigSpec `mo:"config"` + Namespace *string `mo:"namespace"` ChildConfiguration []types.ResourceConfigSpec `mo:"childConfiguration"` } @@ -1556,6 +1575,18 @@ func init() { t["SimpleCommand"] = reflect.TypeOf((*SimpleCommand)(nil)).Elem() } +type SiteInfoManager struct { + Self types.ManagedObjectReference +} + +func (m SiteInfoManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["SiteInfoManager"] = reflect.TypeOf((*SiteInfoManager)(nil)).Elem() +} + type StoragePod struct { Folder @@ -1567,6 +1598,18 @@ func init() { t["StoragePod"] = reflect.TypeOf((*StoragePod)(nil)).Elem() } +type StorageQueryManager struct { + Self types.ManagedObjectReference +} + +func (m StorageQueryManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["StorageQueryManager"] = reflect.TypeOf((*StorageQueryManager)(nil)).Elem() +} + type StorageResourceManager struct { Self types.ManagedObjectReference } @@ -1615,6 +1658,18 @@ func init() { t["TaskManager"] = reflect.TypeOf((*TaskManager)(nil)).Elem() } +type TenantTenantManager struct { + Self types.ManagedObjectReference +} + +func (m TenantTenantManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["TenantTenantManager"] = reflect.TypeOf((*TenantTenantManager)(nil)).Elem() +} + type UserDirectory struct { Self types.ManagedObjectReference @@ -1744,6 +1799,18 @@ func init() { t["VirtualMachineCompatibilityChecker"] = reflect.TypeOf((*VirtualMachineCompatibilityChecker)(nil)).Elem() } +type VirtualMachineGuestCustomizationManager struct { + Self types.ManagedObjectReference +} + +func (m VirtualMachineGuestCustomizationManager) Reference() types.ManagedObjectReference { + return m.Self +} + +func init() { + t["VirtualMachineGuestCustomizationManager"] = reflect.TypeOf((*VirtualMachineGuestCustomizationManager)(nil)).Elem() +} + type VirtualMachineProvisioningChecker struct { Self types.ManagedObjectReference } diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go index c470c0ac0..e877da063 100644 --- a/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go +++ b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go @@ -46,7 +46,7 @@ func ignoreMissingProperty(ref types.ManagedObjectReference, p types.MissingProp // it returns the first fault it finds there as error. If the 'MissingSet' // field is empty, it returns a pointer to a reflect.Value. It handles contain // nested properties, such as 'guest.ipAddress' or 'config.hardware'. -func ObjectContentToType(o types.ObjectContent) (interface{}, error) { +func ObjectContentToType(o types.ObjectContent, ptr ...bool) (interface{}, error) { // Expect no properties in the missing set for _, p := range o.MissingSet { if ignoreMissingProperty(o.Obj, p) { @@ -62,6 +62,9 @@ func ObjectContentToType(o types.ObjectContent) (interface{}, error) { return nil, err } + if len(ptr) == 1 && ptr[0] { + return v.Interface(), nil + } return v.Elem().Interface(), nil } @@ -81,9 +84,9 @@ func ApplyPropertyChange(obj Reference, changes []types.PropertyChange) { } } -// LoadRetrievePropertiesResponse converts the response of a call to -// RetrieveProperties to one or more managed objects. -func LoadRetrievePropertiesResponse(res *types.RetrievePropertiesResponse, dst interface{}) error { +// LoadObjectContent converts the response of a call to +// RetrieveProperties{Ex} to one or more managed objects. +func LoadObjectContent(content []types.ObjectContent, dst interface{}) error { rt := reflect.TypeOf(dst) if rt == nil || rt.Kind() != reflect.Ptr { panic("need pointer") @@ -104,7 +107,7 @@ func LoadRetrievePropertiesResponse(res *types.RetrievePropertiesResponse, dst i } if isSlice { - for _, p := range res.Returnval { + for _, p := range content { v, err := ObjectContentToType(p) if err != nil { return err @@ -123,10 +126,10 @@ func LoadRetrievePropertiesResponse(res *types.RetrievePropertiesResponse, dst i rv.Set(reflect.Append(rv, reflect.ValueOf(v))) } } else { - switch len(res.Returnval) { + switch len(content) { case 0: case 1: - v, err := ObjectContentToType(res.Returnval[0]) + v, err := ObjectContentToType(content[0]) if err != nil { return err } @@ -160,7 +163,7 @@ func RetrievePropertiesForRequest(ctx context.Context, r soap.RoundTripper, req return err } - return LoadRetrievePropertiesResponse(res, dst) + return LoadObjectContent(res.Returnval, dst) } // RetrieveProperties retrieves the properties of the managed object specified @@ -188,3 +191,65 @@ func RetrieveProperties(ctx context.Context, r soap.RoundTripper, pc, obj types. return RetrievePropertiesForRequest(ctx, r, req, dst) } + +var morType = reflect.TypeOf((*types.ManagedObjectReference)(nil)).Elem() + +// References returns all non-nil moref field values in the given struct. +// Only Anonymous struct fields are followed by default. The optional follow +// param will follow any struct fields when true. +func References(s interface{}, follow ...bool) []types.ManagedObjectReference { + var refs []types.ManagedObjectReference + rval := reflect.ValueOf(s) + rtype := rval.Type() + + if rval.Kind() == reflect.Ptr { + rval = rval.Elem() + rtype = rval.Type() + } + + for i := 0; i < rval.NumField(); i++ { + val := rval.Field(i) + finfo := rtype.Field(i) + + if finfo.Anonymous { + refs = append(refs, References(val.Interface(), follow...)...) + continue + } + if finfo.Name == "Self" { + continue + } + + ftype := val.Type() + + if ftype.Kind() == reflect.Slice { + if ftype.Elem() == morType { + s := val.Interface().([]types.ManagedObjectReference) + for i := range s { + refs = append(refs, s[i]) + } + } + continue + } + + if ftype.Kind() == reflect.Ptr { + if val.IsNil() { + continue + } + val = val.Elem() + ftype = val.Type() + } + + if ftype == morType { + refs = append(refs, val.Interface().(types.ManagedObjectReference)) + continue + } + + if len(follow) != 0 && follow[0] { + if ftype.Kind() == reflect.Struct && val.CanSet() { + refs = append(refs, References(val.Interface(), follow...)...) + } + } + } + + return refs +} diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go b/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go index 661f24228..aeb7d4a79 100644 --- a/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go +++ b/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go @@ -157,7 +157,7 @@ func (t *typeInfo) build(typ reflect.Type, fn string, fi []int) { var nilValue reflect.Value -// assignValue assignes a value 'pv' to the struct pointed to by 'val', given a +// assignValue assigns a value 'pv' to the struct pointed to by 'val', given a // slice of field indices. It recurses into the struct until it finds the field // specified by the indices. It creates new values for pointer types where // needed. @@ -256,3 +256,8 @@ func (t *typeInfo) LoadFromObjectContent(o types.ObjectContent) (reflect.Value, return v, nil } + +func IsManagedObjectType(kind string) bool { + _, ok := t[kind] + return ok +} diff --git a/vendor/github.com/vmware/govmomi/vim25/progress/reader.go b/vendor/github.com/vmware/govmomi/vim25/progress/reader.go index cb184cfe9..201333ee4 100644 --- a/vendor/github.com/vmware/govmomi/vim25/progress/reader.go +++ b/vendor/github.com/vmware/govmomi/vim25/progress/reader.go @@ -36,6 +36,9 @@ type readerReport struct { } func (r readerReport) Percentage() float32 { + if r.size <= 0 { + return 0 + } return 100.0 * float32(r.pos) / float32(r.size) } @@ -158,7 +161,7 @@ func bpsLoop(ch <-chan Report, dst *uint64) { // Setup timer for front of list to become stale. if e := l.Front(); e != nil { - dt := time.Second - time.Now().Sub(e.Value.(readerReport).t) + dt := time.Second - time.Since(e.Value.(readerReport).t) tch = time.After(dt) } diff --git a/vendor/github.com/vmware/govmomi/vim25/retry.go b/vendor/github.com/vmware/govmomi/vim25/retry.go index b8807eef3..bf663a101 100644 --- a/vendor/github.com/vmware/govmomi/vim25/retry.go +++ b/vendor/github.com/vmware/govmomi/vim25/retry.go @@ -18,8 +18,6 @@ package vim25 import ( "context" - "net" - "net/url" "time" "github.com/vmware/govmomi/vim25/soap" @@ -27,37 +25,40 @@ import ( type RetryFunc func(err error) (retry bool, delay time.Duration) -// TemporaryNetworkError returns a RetryFunc that retries up to a maximum of n -// times, only if the error returned by the RoundTrip function is a temporary -// network error (for example: a connect timeout). +// TemporaryNetworkError is deprecated. Use Retry() with RetryTemporaryNetworkError and retryAttempts instead. func TemporaryNetworkError(n int) RetryFunc { - return func(err error) (retry bool, delay time.Duration) { - var nerr net.Error - var ok bool - - // Never retry if this is not a network error. - switch rerr := err.(type) { - case *url.Error: - if nerr, ok = rerr.Err.(net.Error); !ok { + return func(err error) (bool, time.Duration) { + if IsTemporaryNetworkError(err) { + // Don't retry if we're out of tries. + if n--; n <= 0 { return false, 0 } - case net.Error: - nerr = rerr - default: - return false, 0 - } - - if !nerr.Temporary() { - return false, 0 + return true, 0 } + return false, 0 + } +} - // Don't retry if we're out of tries. - if n--; n <= 0 { - return false, 0 - } +// RetryTemporaryNetworkError returns a RetryFunc that returns IsTemporaryNetworkError(err) +func RetryTemporaryNetworkError(err error) (bool, time.Duration) { + return IsTemporaryNetworkError(err), 0 +} - return true, 0 +// IsTemporaryNetworkError returns false unless the error implements +// a Temporary() bool method such as url.Error and net.Error. +// Otherwise, returns the value of the Temporary() method. +func IsTemporaryNetworkError(err error) bool { + t, ok := err.(interface { + // Temporary is implemented by url.Error and net.Error + Temporary() bool + }) + + if !ok { + // Not a Temporary error. + return false } + + return t.Temporary() } type retry struct { @@ -66,7 +67,8 @@ type retry struct { // fn is a custom function that is called when an error occurs. // It returns whether or not to retry, and if so, how long to // delay before retrying. - fn RetryFunc + fn RetryFunc + maxRetryAttempts int } // Retry wraps the specified soap.RoundTripper and invokes the @@ -74,10 +76,16 @@ type retry struct { // retry the call, and if so, how long to wait before retrying. If // the result of this function is to not retry, the original error // is returned from the RoundTrip function. -func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { +// The soap.RoundTripper will return the original error if retryAttempts is specified and reached. +func Retry(roundTripper soap.RoundTripper, fn RetryFunc, retryAttempts ...int) soap.RoundTripper { r := &retry{ - roundTripper: roundTripper, - fn: fn, + roundTripper: roundTripper, + fn: fn, + maxRetryAttempts: 1, + } + + if len(retryAttempts) == 1 { + r.maxRetryAttempts = retryAttempts[0] } return r @@ -86,7 +94,7 @@ func Retry(roundTripper soap.RoundTripper, fn RetryFunc) soap.RoundTripper { func (r *retry) RoundTrip(ctx context.Context, req, res soap.HasFault) error { var err error - for { + for attempt := 0; attempt < r.maxRetryAttempts; attempt++ { err = r.roundTripper.RoundTrip(ctx, req, res) if err == nil { break diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/client.go b/vendor/github.com/vmware/govmomi/vim25/soap/client.go index 90b9fbe16..424c20ce6 100644 --- a/vendor/github.com/vmware/govmomi/vim25/soap/client.go +++ b/vendor/github.com/vmware/govmomi/vim25/soap/client.go @@ -35,6 +35,7 @@ import ( "net/url" "os" "path/filepath" + "reflect" "regexp" "strings" "sync" @@ -70,9 +71,11 @@ type Client struct { Namespace string // Vim namespace Version string // Vim version + Types types.Func UserAgent string - cookie string + cookie string + insecureCookies bool } var schemeMatch = regexp.MustCompile(`^\w+://`) @@ -122,6 +125,8 @@ func NewClient(u *url.URL, insecure bool) *Client { u: u, k: insecure, d: newDebug(), + + Types: types.TypeFunc(), } // Initialize http.RoundTripper on client, so we can customize it below @@ -152,9 +157,17 @@ func NewClient(u *url.URL, insecure bool) *Client { c.u = c.URL() c.u.User = nil + if c.u.Scheme == "http" { + c.insecureCookies = os.Getenv("GOVMOMI_INSECURE_COOKIES") == "true" + } + return &c } +func (c *Client) DefaultTransport() *http.Transport { + return c.t +} + // NewServiceClient creates a NewClient with the given URL.Path and namespace. func (c *Client) NewServiceClient(path string, namespace string) *Client { vc := c.URL() @@ -169,7 +182,7 @@ func (c *Client) NewServiceClient(path string, namespace string) *Client { client := NewClient(u, c.k) client.Namespace = "urn:" + namespace - client.Transport.(*http.Transport).TLSClientConfig = c.Transport.(*http.Transport).TLSClientConfig + client.DefaultTransport().TLSClientConfig = c.DefaultTransport().TLSClientConfig if cert := c.Certificate(); cert != nil { client.SetCertificate(*cert) } @@ -195,19 +208,34 @@ func (c *Client) NewServiceClient(path string, namespace string) *Client { // Copy any query params (e.g. GOVMOMI_TUNNEL_PROXY_PORT used in testing) client.u.RawQuery = vc.RawQuery + client.UserAgent = c.UserAgent + + vimTypes := c.Types + client.Types = func(name string) (reflect.Type, bool) { + kind, ok := vimTypes(name) + if ok { + return kind, ok + } + // vim25/xml typeToString() does not have an option to include namespace prefix. + // Workaround this by re-trying the lookup with the namespace prefix. + return vimTypes(namespace + ":" + name) + } + return client } -// SetRootCAs defines the set of root certificate authorities -// that clients use when verifying server certificates. -// By default TLS uses the host's root CA set. +// SetRootCAs defines the set of PEM-encoded file locations of root certificate +// authorities the client uses when verifying server certificates instead of the +// TLS defaults which uses the host's root CA set. Multiple PEM file locations +// can be specified using the OS-specific PathListSeparator. // -// See: http.Client.Transport.TLSClientConfig.RootCAs -func (c *Client) SetRootCAs(file string) error { +// See: http.Client.Transport.TLSClientConfig.RootCAs and +// https://pkg.go.dev/os#PathListSeparator +func (c *Client) SetRootCAs(pemPaths string) error { pool := x509.NewCertPool() - for _, name := range filepath.SplitList(file) { - pem, err := ioutil.ReadFile(name) + for _, name := range filepath.SplitList(pemPaths) { + pem, err := ioutil.ReadFile(filepath.Clean(name)) if err != nil { return err } @@ -277,7 +305,7 @@ func (c *Client) LoadThumbprints(file string) error { } func (c *Client) loadThumbprints(name string) error { - f, err := os.Open(name) + f, err := os.Open(filepath.Clean(name)) if err != nil { if os.IsNotExist(err) { return nil @@ -346,7 +374,7 @@ func (c *Client) dialTLS(network string, addr string) (net.Conn, error) { if thumbprint != peer { _ = conn.Close() - return nil, fmt.Errorf("Host %q thumbprint does not match %q", addr, thumbprint) + return nil, fmt.Errorf("host %q thumbprint does not match %q", addr, thumbprint) } return conn, nil @@ -424,6 +452,7 @@ type marshaledClient struct { Cookies []*http.Cookie URL *url.URL Insecure bool + Version string } func (c *Client) MarshalJSON() ([]byte, error) { @@ -431,6 +460,7 @@ func (c *Client) MarshalJSON() ([]byte, error) { Cookies: c.Jar.Cookies(c.u), URL: c.u, Insecure: c.k, + Version: c.Version, } return json.Marshal(m) @@ -445,6 +475,7 @@ func (c *Client) UnmarshalJSON(b []byte) error { } *c = *NewClient(m.URL, m.Insecure) + c.Version = m.Version c.Jar.SetCookies(m.URL, m.Cookies) return nil @@ -452,6 +483,16 @@ func (c *Client) UnmarshalJSON(b []byte) error { type kindContext struct{} +func (c *Client) setInsecureCookies(res *http.Response) { + cookies := res.Cookies() + if len(cookies) != 0 { + for _, cookie := range cookies { + cookie.Secure = false + } + c.Jar.SetCookies(c.u, cookies) + } +} + func (c *Client) Do(ctx context.Context, req *http.Request, f func(*http.Response) error) error { if ctx == nil { ctx = context.Background() @@ -466,8 +507,9 @@ func (c *Client) Do(ctx context.Context, req *http.Request, f func(*http.Respons req.Header.Set(`User-Agent`, c.UserAgent) } + ext := "" if d.enabled() { - d.debugRequest(req) + ext = d.debugRequest(req) } tstart := time.Now() @@ -488,12 +530,16 @@ func (c *Client) Do(ctx context.Context, req *http.Request, f func(*http.Respons return err } - defer res.Body.Close() - if d.enabled() { - d.debugResponse(res) + d.debugResponse(res, ext) } + if c.insecureCookies { + c.setInsecureCookies(res) + } + + defer res.Body.Close() + return f(res) } @@ -511,6 +557,32 @@ func (c *Client) WithHeader(ctx context.Context, header Header) context.Context return context.WithValue(ctx, headerContext{}, header) } +type statusError struct { + res *http.Response +} + +// Temporary returns true for HTTP response codes that can be retried +// See vim25.IsTemporaryNetworkError +func (e *statusError) Temporary() bool { + switch e.res.StatusCode { + case http.StatusBadGateway: + return true + } + return false +} + +func (e *statusError) Error() string { + return e.res.Status +} + +func newStatusError(res *http.Response) error { + return &url.Error{ + Op: res.Request.Method, + URL: res.Request.URL.Path, + Err: &statusError{res}, + } +} + func (c *Client) RoundTrip(ctx context.Context, reqBody, resBody HasFault) error { var err error var b []byte @@ -566,11 +638,11 @@ func (c *Client) RoundTrip(ctx context.Context, reqBody, resBody HasFault) error case http.StatusInternalServerError: // Error, but typically includes a body explaining the error default: - return errors.New(res.Status) + return newStatusError(res) } dec := xml.NewDecoder(res.Body) - dec.TypeFunc = types.TypeFunc() + dec.TypeFunc = c.Types err = dec.Decode(&resEnv) if err != nil { return err @@ -681,7 +753,7 @@ func (c *Client) UploadFile(ctx context.Context, file string, u *url.URL, param return err } - f, err := os.Open(file) + f, err := os.Open(filepath.Clean(file)) if err != nil { return err } @@ -734,7 +806,7 @@ func (c *Client) Download(ctx context.Context, u *url.URL, param *Download) (io. switch res.StatusCode { case http.StatusOK: default: - err = errors.New(res.Status) + err = fmt.Errorf("download(%s): %s", u, res.Status) } if err != nil { @@ -758,7 +830,7 @@ func (c *Client) WriteFile(ctx context.Context, file string, src io.Reader, size if s != nil { pr := progress.NewReader(ctx, s, src, size) - src = pr + r = pr // Mark progress reader as done when returning from this function. defer func() { diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/debug.go b/vendor/github.com/vmware/govmomi/vim25/soap/debug.go index 844581b99..bc5b90203 100644 --- a/vendor/github.com/vmware/govmomi/vim25/soap/debug.go +++ b/vendor/github.com/vmware/govmomi/vim25/soap/debug.go @@ -21,25 +21,19 @@ import ( "io" "net/http" "net/http/httputil" - "strings" "sync/atomic" "time" "github.com/vmware/govmomi/vim25/debug" ) -// teeReader wraps io.TeeReader and patches through the Close() function. -type teeReader struct { - io.Reader - io.Closer -} - -func newTeeReader(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return teeReader{ - Reader: io.TeeReader(rc, w), - Closer: rc, +var ( + // Trace reads an http request or response from rc and writes to w. + // The content type (kind) should be one of "xml" or "json". + Trace = func(rc io.ReadCloser, w io.Writer, kind string) io.ReadCloser { + return debug.NewTeeReader(rc, w) } -} +) // debugRoundTrip contains state and logic needed to debug a single round trip. type debugRoundTrip struct { @@ -71,50 +65,52 @@ func (d *debugRoundTrip) newFile(suffix string) io.WriteCloser { } func (d *debugRoundTrip) ext(h http.Header) string { + const json = "application/json" ext := "xml" - if strings.Contains(h.Get("Content-Type"), "/json") { + if h.Get("Accept") == json || h.Get("Content-Type") == json { ext = "json" } return ext } -func (d *debugRoundTrip) debugRequest(req *http.Request) { +func (d *debugRoundTrip) debugRequest(req *http.Request) string { if d == nil { - return + return "" } - var wc io.WriteCloser - // Capture headers - wc = d.newFile("req.headers") + var wc io.WriteCloser = d.newFile("req.headers") b, _ := httputil.DumpRequest(req, false) wc.Write(b) wc.Close() + ext := d.ext(req.Header) // Capture body - wc = d.newFile("req." + d.ext(req.Header)) - req.Body = newTeeReader(req.Body, wc) + wc = d.newFile("req." + ext) + if req.Body != nil { + req.Body = Trace(req.Body, wc, ext) + } // Delay closing until marked done d.cs = append(d.cs, wc) + + return ext } -func (d *debugRoundTrip) debugResponse(res *http.Response) { +func (d *debugRoundTrip) debugResponse(res *http.Response, ext string) { if d == nil { return } - var wc io.WriteCloser - // Capture headers - wc = d.newFile("res.headers") + var wc io.WriteCloser = d.newFile("res.headers") b, _ := httputil.DumpResponse(res, false) wc.Write(b) wc.Close() // Capture body - wc = d.newFile("res." + d.ext(res.Header)) - res.Body = newTeeReader(res.Body, wc) + wc = d.newFile("res." + ext) + res.Body = Trace(res.Body, wc, ext) // Delay closing until marked done d.cs = append(d.cs, wc) diff --git a/vendor/github.com/vmware/govmomi/vim25/soap/error.go b/vendor/github.com/vmware/govmomi/vim25/soap/error.go index 46111556c..1e1508733 100644 --- a/vendor/github.com/vmware/govmomi/vim25/soap/error.go +++ b/vendor/github.com/vmware/govmomi/vim25/soap/error.go @@ -17,6 +17,7 @@ limitations under the License. package soap import ( + "encoding/json" "fmt" "reflect" @@ -49,6 +50,15 @@ func (s soapFaultError) Error() string { return fmt.Sprintf("%s: %s", s.fault.Code, msg) } +func (s soapFaultError) MarshalJSON() ([]byte, error) { + out := struct { + Fault *Fault + }{ + Fault: s.fault, + } + return json.Marshal(out) +} + type vimFaultError struct { fault types.BaseMethodFault } diff --git a/vendor/github.com/vmware/govmomi/vim25/types/enum.go b/vendor/github.com/vmware/govmomi/vim25/types/enum.go index c8c597752..d997fe296 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/enum.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/enum.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -200,8 +200,7 @@ func init() { type CannotEnableVmcpForClusterReason string const ( - CannotEnableVmcpForClusterReasonAPDTimeoutDisabled = CannotEnableVmcpForClusterReason("APDTimeoutDisabled") - CannotEnableVmcpForClusterReasonIncompatibleHostVersion = CannotEnableVmcpForClusterReason("IncompatibleHostVersion") + CannotEnableVmcpForClusterReasonAPDTimeoutDisabled = CannotEnableVmcpForClusterReason("APDTimeoutDisabled") ) func init() { @@ -239,6 +238,8 @@ const ( CannotUseNetworkReasonMismatchedNetworkPolicies = CannotUseNetworkReason("MismatchedNetworkPolicies") CannotUseNetworkReasonMismatchedDvsVersionOrVendor = CannotUseNetworkReason("MismatchedDvsVersionOrVendor") CannotUseNetworkReasonVMotionToUnsupportedNetworkType = CannotUseNetworkReason("VMotionToUnsupportedNetworkType") + CannotUseNetworkReasonNetworkUnderMaintenance = CannotUseNetworkReason("NetworkUnderMaintenance") + CannotUseNetworkReasonMismatchedEnsMode = CannotUseNetworkReason("MismatchedEnsMode") ) func init() { @@ -259,6 +260,41 @@ func init() { t["CheckTestType"] = reflect.TypeOf((*CheckTestType)(nil)).Elem() } +type ClusterComputeResourceHCIWorkflowState string + +const ( + ClusterComputeResourceHCIWorkflowStateIn_progress = ClusterComputeResourceHCIWorkflowState("in_progress") + ClusterComputeResourceHCIWorkflowStateDone = ClusterComputeResourceHCIWorkflowState("done") + ClusterComputeResourceHCIWorkflowStateInvalid = ClusterComputeResourceHCIWorkflowState("invalid") +) + +func init() { + t["ClusterComputeResourceHCIWorkflowState"] = reflect.TypeOf((*ClusterComputeResourceHCIWorkflowState)(nil)).Elem() +} + +type ClusterComputeResourceVcsHealthStatus string + +const ( + ClusterComputeResourceVcsHealthStatusHealthy = ClusterComputeResourceVcsHealthStatus("healthy") + ClusterComputeResourceVcsHealthStatusDegraded = ClusterComputeResourceVcsHealthStatus("degraded") + ClusterComputeResourceVcsHealthStatusNonhealthy = ClusterComputeResourceVcsHealthStatus("nonhealthy") +) + +func init() { + t["ClusterComputeResourceVcsHealthStatus"] = reflect.TypeOf((*ClusterComputeResourceVcsHealthStatus)(nil)).Elem() +} + +type ClusterCryptoConfigInfoCryptoMode string + +const ( + ClusterCryptoConfigInfoCryptoModeOnDemand = ClusterCryptoConfigInfoCryptoMode("onDemand") + ClusterCryptoConfigInfoCryptoModeForceEnable = ClusterCryptoConfigInfoCryptoMode("forceEnable") +) + +func init() { + t["ClusterCryptoConfigInfoCryptoMode"] = reflect.TypeOf((*ClusterCryptoConfigInfoCryptoMode)(nil)).Elem() +} + type ClusterDasAamNodeStateDasState string const ( @@ -494,6 +530,34 @@ func init() { t["ConfigSpecOperation"] = reflect.TypeOf((*ConfigSpecOperation)(nil)).Elem() } +type CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason string + +const ( + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateMissingInCache = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateMissingInCache") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateClusterInvalid = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateClusterInvalid") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateClusterUnreachable = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateClusterUnreachable") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateMissingInKMS = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateMissingInKMS") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateNotActiveOrEnabled = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateNotActiveOrEnabled") + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateManagedByTrustAuthority = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateManagedByTrustAuthority") +) + +func init() { + t["CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason"] = reflect.TypeOf((*CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason)(nil)).Elem() +} + +type CustomizationFailedReasonCode string + +const ( + CustomizationFailedReasonCodeUserDefinedScriptDisabled = CustomizationFailedReasonCode("userDefinedScriptDisabled") + CustomizationFailedReasonCodeCustomizationDisabled = CustomizationFailedReasonCode("customizationDisabled") + CustomizationFailedReasonCodeRawDataIsNotSupported = CustomizationFailedReasonCode("rawDataIsNotSupported") + CustomizationFailedReasonCodeWrongMetadataFormat = CustomizationFailedReasonCode("wrongMetadataFormat") +) + +func init() { + t["CustomizationFailedReasonCode"] = reflect.TypeOf((*CustomizationFailedReasonCode)(nil)).Elem() +} + type CustomizationLicenseDataMode string const ( @@ -576,6 +640,8 @@ const ( DasConfigFaultDasConfigFaultReasonCreateConfigVvolFailed = DasConfigFaultDasConfigFaultReason("CreateConfigVvolFailed") DasConfigFaultDasConfigFaultReasonVSanNotSupportedOnHost = DasConfigFaultDasConfigFaultReason("VSanNotSupportedOnHost") DasConfigFaultDasConfigFaultReasonDasNetworkMisconfiguration = DasConfigFaultDasConfigFaultReason("DasNetworkMisconfiguration") + DasConfigFaultDasConfigFaultReasonSetDesiredImageSpecFailed = DasConfigFaultDasConfigFaultReason("SetDesiredImageSpecFailed") + DasConfigFaultDasConfigFaultReasonApplyHAVibsOnClusterFailed = DasConfigFaultDasConfigFaultReason("ApplyHAVibsOnClusterFailed") ) func init() { @@ -703,6 +769,17 @@ func init() { t["DisallowedChangeByServiceDisallowedChange"] = reflect.TypeOf((*DisallowedChangeByServiceDisallowedChange)(nil)).Elem() } +type DistributedVirtualPortgroupBackingType string + +const ( + DistributedVirtualPortgroupBackingTypeStandard = DistributedVirtualPortgroupBackingType("standard") + DistributedVirtualPortgroupBackingTypeNsx = DistributedVirtualPortgroupBackingType("nsx") +) + +func init() { + t["DistributedVirtualPortgroupBackingType"] = reflect.TypeOf((*DistributedVirtualPortgroupBackingType)(nil)).Elem() +} + type DistributedVirtualPortgroupMetaTagName string const ( @@ -739,6 +816,8 @@ const ( DistributedVirtualSwitchHostInfrastructureTrafficClassHbr = DistributedVirtualSwitchHostInfrastructureTrafficClass("hbr") DistributedVirtualSwitchHostInfrastructureTrafficClassVsan = DistributedVirtualSwitchHostInfrastructureTrafficClass("vsan") DistributedVirtualSwitchHostInfrastructureTrafficClassVdp = DistributedVirtualSwitchHostInfrastructureTrafficClass("vdp") + DistributedVirtualSwitchHostInfrastructureTrafficClassBackupNfc = DistributedVirtualSwitchHostInfrastructureTrafficClass("backupNfc") + DistributedVirtualSwitchHostInfrastructureTrafficClassNvmetcp = DistributedVirtualSwitchHostInfrastructureTrafficClass("nvmetcp") ) func init() { @@ -760,6 +839,17 @@ func init() { t["DistributedVirtualSwitchHostMemberHostComponentState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberHostComponentState)(nil)).Elem() } +type DistributedVirtualSwitchHostMemberTransportZoneType string + +const ( + DistributedVirtualSwitchHostMemberTransportZoneTypeVlan = DistributedVirtualSwitchHostMemberTransportZoneType("vlan") + DistributedVirtualSwitchHostMemberTransportZoneTypeOverlay = DistributedVirtualSwitchHostMemberTransportZoneType("overlay") +) + +func init() { + t["DistributedVirtualSwitchHostMemberTransportZoneType"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberTransportZoneType)(nil)).Elem() +} + type DistributedVirtualSwitchNetworkResourceControlVersion string const ( @@ -997,6 +1087,17 @@ func init() { t["FileSystemMountInfoVStorageSupportStatus"] = reflect.TypeOf((*FileSystemMountInfoVStorageSupportStatus)(nil)).Elem() } +type FolderDesiredHostState string + +const ( + FolderDesiredHostStateMaintenance = FolderDesiredHostState("maintenance") + FolderDesiredHostStateNon_maintenance = FolderDesiredHostState("non_maintenance") +) + +func init() { + t["FolderDesiredHostState"] = reflect.TypeOf((*FolderDesiredHostState)(nil)).Elem() +} + type FtIssuesOnHostHostSelectionType string const ( @@ -1033,6 +1134,20 @@ func init() { t["GuestInfoAppStateType"] = reflect.TypeOf((*GuestInfoAppStateType)(nil)).Elem() } +type GuestInfoCustomizationStatus string + +const ( + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_IDLE = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_IDLE") + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_PENDING = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_PENDING") + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_RUNNING = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_RUNNING") + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_SUCCEEDED = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_SUCCEEDED") + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_FAILED = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_FAILED") +) + +func init() { + t["GuestInfoCustomizationStatus"] = reflect.TypeOf((*GuestInfoCustomizationStatus)(nil)).Elem() +} + type GuestOsDescriptorFirmwareType string const ( @@ -1213,6 +1328,7 @@ const ( HostCpuPackageVendorUnknown = HostCpuPackageVendor("unknown") HostCpuPackageVendorIntel = HostCpuPackageVendor("intel") HostCpuPackageVendorAmd = HostCpuPackageVendor("amd") + HostCpuPackageVendorHygon = HostCpuPackageVendor("hygon") ) func init() { @@ -1234,9 +1350,10 @@ func init() { type HostCryptoState string const ( - HostCryptoStateIncapable = HostCryptoState("incapable") - HostCryptoStatePrepared = HostCryptoState("prepared") - HostCryptoStateSafe = HostCryptoState("safe") + HostCryptoStateIncapable = HostCryptoState("incapable") + HostCryptoStatePrepared = HostCryptoState("prepared") + HostCryptoStateSafe = HostCryptoState("safe") + HostCryptoStatePendingIncapable = HostCryptoState("pendingIncapable") ) func init() { @@ -1260,6 +1377,17 @@ func init() { t["HostDasErrorEventHostDasErrorReason"] = reflect.TypeOf((*HostDasErrorEventHostDasErrorReason)(nil)).Elem() } +type HostDateTimeInfoProtocol string + +const ( + HostDateTimeInfoProtocolNtp = HostDateTimeInfoProtocol("ntp") + HostDateTimeInfoProtocolPtp = HostDateTimeInfoProtocol("ptp") +) + +func init() { + t["HostDateTimeInfoProtocol"] = reflect.TypeOf((*HostDateTimeInfoProtocol)(nil)).Elem() +} + type HostDigestInfoDigestMethodType string const ( @@ -1275,6 +1403,19 @@ func init() { t["HostDigestInfoDigestMethodType"] = reflect.TypeOf((*HostDigestInfoDigestMethodType)(nil)).Elem() } +type HostDigestVerificationSetting string + +const ( + HostDigestVerificationSettingDigestDisabled = HostDigestVerificationSetting("digestDisabled") + HostDigestVerificationSettingHeaderOnly = HostDigestVerificationSetting("headerOnly") + HostDigestVerificationSettingDataOnly = HostDigestVerificationSetting("dataOnly") + HostDigestVerificationSettingHeaderAndData = HostDigestVerificationSetting("headerAndData") +) + +func init() { + t["HostDigestVerificationSetting"] = reflect.TypeOf((*HostDigestVerificationSetting)(nil)).Elem() +} + type HostDisconnectedEventReasonCode string const ( @@ -1343,6 +1484,7 @@ const ( HostFileSystemVolumeFileSystemTypeVFFS = HostFileSystemVolumeFileSystemType("VFFS") HostFileSystemVolumeFileSystemTypeVVOL = HostFileSystemVolumeFileSystemType("VVOL") HostFileSystemVolumeFileSystemTypePMEM = HostFileSystemVolumeFileSystemType("PMEM") + HostFileSystemVolumeFileSystemTypeVsanD = HostFileSystemVolumeFileSystemType("vsanD") HostFileSystemVolumeFileSystemTypeOTHER = HostFileSystemVolumeFileSystemType("OTHER") ) @@ -1383,6 +1525,18 @@ func init() { t["HostFirewallRuleProtocol"] = reflect.TypeOf((*HostFirewallRuleProtocol)(nil)).Elem() } +type HostFruFruType string + +const ( + HostFruFruTypeUndefined = HostFruFruType("undefined") + HostFruFruTypeBoard = HostFruFruType("board") + HostFruFruTypeProduct = HostFruFruType("product") +) + +func init() { + t["HostFruFruType"] = reflect.TypeOf((*HostFruFruType)(nil)).Elem() +} + type HostGraphicsConfigGraphicsType string const ( @@ -1632,6 +1786,50 @@ func init() { t["HostLowLevelProvisioningManagerReloadTarget"] = reflect.TypeOf((*HostLowLevelProvisioningManagerReloadTarget)(nil)).Elem() } +type HostMaintenanceSpecPurpose string + +const ( + HostMaintenanceSpecPurposeHostUpgrade = HostMaintenanceSpecPurpose("hostUpgrade") +) + +func init() { + t["HostMaintenanceSpecPurpose"] = reflect.TypeOf((*HostMaintenanceSpecPurpose)(nil)).Elem() +} + +type HostMemoryTierFlags string + +const ( + HostMemoryTierFlagsMemoryTier = HostMemoryTierFlags("memoryTier") + HostMemoryTierFlagsPersistentTier = HostMemoryTierFlags("persistentTier") + HostMemoryTierFlagsCachingTier = HostMemoryTierFlags("cachingTier") +) + +func init() { + t["HostMemoryTierFlags"] = reflect.TypeOf((*HostMemoryTierFlags)(nil)).Elem() +} + +type HostMemoryTierType string + +const ( + HostMemoryTierTypeDRAM = HostMemoryTierType("DRAM") + HostMemoryTierTypePMem = HostMemoryTierType("PMem") +) + +func init() { + t["HostMemoryTierType"] = reflect.TypeOf((*HostMemoryTierType)(nil)).Elem() +} + +type HostMemoryTieringType string + +const ( + HostMemoryTieringTypeNoTiering = HostMemoryTieringType("noTiering") + HostMemoryTieringTypeHardwareTiering = HostMemoryTieringType("hardwareTiering") +) + +func init() { + t["HostMemoryTieringType"] = reflect.TypeOf((*HostMemoryTieringType)(nil)).Elem() +} + type HostMountInfoInaccessibleReason string const ( @@ -1725,12 +1923,66 @@ func init() { t["HostNumericSensorType"] = reflect.TypeOf((*HostNumericSensorType)(nil)).Elem() } +type HostNvmeDiscoveryLogSubsystemType string + +const ( + HostNvmeDiscoveryLogSubsystemTypeDiscovery = HostNvmeDiscoveryLogSubsystemType("discovery") + HostNvmeDiscoveryLogSubsystemTypeNvm = HostNvmeDiscoveryLogSubsystemType("nvm") +) + +func init() { + t["HostNvmeDiscoveryLogSubsystemType"] = reflect.TypeOf((*HostNvmeDiscoveryLogSubsystemType)(nil)).Elem() +} + +type HostNvmeDiscoveryLogTransportRequirements string + +const ( + HostNvmeDiscoveryLogTransportRequirementsSecureChannelRequired = HostNvmeDiscoveryLogTransportRequirements("secureChannelRequired") + HostNvmeDiscoveryLogTransportRequirementsSecureChannelNotRequired = HostNvmeDiscoveryLogTransportRequirements("secureChannelNotRequired") + HostNvmeDiscoveryLogTransportRequirementsRequirementsNotSpecified = HostNvmeDiscoveryLogTransportRequirements("requirementsNotSpecified") +) + +func init() { + t["HostNvmeDiscoveryLogTransportRequirements"] = reflect.TypeOf((*HostNvmeDiscoveryLogTransportRequirements)(nil)).Elem() +} + +type HostNvmeTransportParametersNvmeAddressFamily string + +const ( + HostNvmeTransportParametersNvmeAddressFamilyIpv4 = HostNvmeTransportParametersNvmeAddressFamily("ipv4") + HostNvmeTransportParametersNvmeAddressFamilyIpv6 = HostNvmeTransportParametersNvmeAddressFamily("ipv6") + HostNvmeTransportParametersNvmeAddressFamilyInfiniBand = HostNvmeTransportParametersNvmeAddressFamily("infiniBand") + HostNvmeTransportParametersNvmeAddressFamilyFc = HostNvmeTransportParametersNvmeAddressFamily("fc") + HostNvmeTransportParametersNvmeAddressFamilyLoopback = HostNvmeTransportParametersNvmeAddressFamily("loopback") + HostNvmeTransportParametersNvmeAddressFamilyUnknown = HostNvmeTransportParametersNvmeAddressFamily("unknown") +) + +func init() { + t["HostNvmeTransportParametersNvmeAddressFamily"] = reflect.TypeOf((*HostNvmeTransportParametersNvmeAddressFamily)(nil)).Elem() +} + +type HostNvmeTransportType string + +const ( + HostNvmeTransportTypePcie = HostNvmeTransportType("pcie") + HostNvmeTransportTypeFibreChannel = HostNvmeTransportType("fibreChannel") + HostNvmeTransportTypeRdma = HostNvmeTransportType("rdma") + HostNvmeTransportTypeTcp = HostNvmeTransportType("tcp") + HostNvmeTransportTypeLoopback = HostNvmeTransportType("loopback") + HostNvmeTransportTypeUnsupported = HostNvmeTransportType("unsupported") +) + +func init() { + t["HostNvmeTransportType"] = reflect.TypeOf((*HostNvmeTransportType)(nil)).Elem() +} + type HostOpaqueSwitchOpaqueSwitchState string const ( - HostOpaqueSwitchOpaqueSwitchStateUp = HostOpaqueSwitchOpaqueSwitchState("up") - HostOpaqueSwitchOpaqueSwitchStateWarning = HostOpaqueSwitchOpaqueSwitchState("warning") - HostOpaqueSwitchOpaqueSwitchStateDown = HostOpaqueSwitchOpaqueSwitchState("down") + HostOpaqueSwitchOpaqueSwitchStateUp = HostOpaqueSwitchOpaqueSwitchState("up") + HostOpaqueSwitchOpaqueSwitchStateWarning = HostOpaqueSwitchOpaqueSwitchState("warning") + HostOpaqueSwitchOpaqueSwitchStateDown = HostOpaqueSwitchOpaqueSwitchState("down") + HostOpaqueSwitchOpaqueSwitchStateMaintenance = HostOpaqueSwitchOpaqueSwitchState("maintenance") ) func init() { @@ -1883,6 +2135,43 @@ func init() { t["HostProtocolEndpointProtocolEndpointType"] = reflect.TypeOf((*HostProtocolEndpointProtocolEndpointType)(nil)).Elem() } +type HostPtpConfigDeviceType string + +const ( + HostPtpConfigDeviceTypeNone = HostPtpConfigDeviceType("none") + HostPtpConfigDeviceTypeVirtualNic = HostPtpConfigDeviceType("virtualNic") + HostPtpConfigDeviceTypePciPassthruNic = HostPtpConfigDeviceType("pciPassthruNic") +) + +func init() { + t["HostPtpConfigDeviceType"] = reflect.TypeOf((*HostPtpConfigDeviceType)(nil)).Elem() +} + +type HostQualifiedNameType string + +const ( + HostQualifiedNameTypeNvmeQualifiedName = HostQualifiedNameType("nvmeQualifiedName") +) + +func init() { + t["HostQualifiedNameType"] = reflect.TypeOf((*HostQualifiedNameType)(nil)).Elem() +} + +type HostRdmaDeviceConnectionState string + +const ( + HostRdmaDeviceConnectionStateUnknown = HostRdmaDeviceConnectionState("unknown") + HostRdmaDeviceConnectionStateDown = HostRdmaDeviceConnectionState("down") + HostRdmaDeviceConnectionStateInit = HostRdmaDeviceConnectionState("init") + HostRdmaDeviceConnectionStateArmed = HostRdmaDeviceConnectionState("armed") + HostRdmaDeviceConnectionStateActive = HostRdmaDeviceConnectionState("active") + HostRdmaDeviceConnectionStateActiveDefer = HostRdmaDeviceConnectionState("activeDefer") +) + +func init() { + t["HostRdmaDeviceConnectionState"] = reflect.TypeOf((*HostRdmaDeviceConnectionState)(nil)).Elem() +} + type HostReplayUnsupportedReason string const ( @@ -1911,6 +2200,29 @@ func init() { t["HostRuntimeInfoNetStackInstanceRuntimeInfoState"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfoState)(nil)).Elem() } +type HostRuntimeInfoStateEncryptionInfoProtectionMode string + +const ( + HostRuntimeInfoStateEncryptionInfoProtectionModeNone = HostRuntimeInfoStateEncryptionInfoProtectionMode("none") + HostRuntimeInfoStateEncryptionInfoProtectionModeTpm = HostRuntimeInfoStateEncryptionInfoProtectionMode("tpm") +) + +func init() { + t["HostRuntimeInfoStateEncryptionInfoProtectionMode"] = reflect.TypeOf((*HostRuntimeInfoStateEncryptionInfoProtectionMode)(nil)).Elem() +} + +type HostRuntimeInfoStatelessNvdsMigrationState string + +const ( + HostRuntimeInfoStatelessNvdsMigrationStateReady = HostRuntimeInfoStatelessNvdsMigrationState("ready") + HostRuntimeInfoStatelessNvdsMigrationStateNotNeeded = HostRuntimeInfoStatelessNvdsMigrationState("notNeeded") + HostRuntimeInfoStatelessNvdsMigrationStateUnknown = HostRuntimeInfoStatelessNvdsMigrationState("unknown") +) + +func init() { + t["HostRuntimeInfoStatelessNvdsMigrationState"] = reflect.TypeOf((*HostRuntimeInfoStatelessNvdsMigrationState)(nil)).Elem() +} + type HostServicePolicy string const ( @@ -1923,6 +2235,47 @@ func init() { t["HostServicePolicy"] = reflect.TypeOf((*HostServicePolicy)(nil)).Elem() } +type HostSevInfoSevState string + +const ( + HostSevInfoSevStateUninitialized = HostSevInfoSevState("uninitialized") + HostSevInfoSevStateInitialized = HostSevInfoSevState("initialized") + HostSevInfoSevStateWorking = HostSevInfoSevState("working") +) + +func init() { + t["HostSevInfoSevState"] = reflect.TypeOf((*HostSevInfoSevState)(nil)).Elem() +} + +type HostSgxInfoFlcModes string + +const ( + HostSgxInfoFlcModesOff = HostSgxInfoFlcModes("off") + HostSgxInfoFlcModesLocked = HostSgxInfoFlcModes("locked") + HostSgxInfoFlcModesUnlocked = HostSgxInfoFlcModes("unlocked") +) + +func init() { + t["HostSgxInfoFlcModes"] = reflect.TypeOf((*HostSgxInfoFlcModes)(nil)).Elem() +} + +type HostSgxInfoSgxStates string + +const ( + HostSgxInfoSgxStatesNotPresent = HostSgxInfoSgxStates("notPresent") + HostSgxInfoSgxStatesDisabledBIOS = HostSgxInfoSgxStates("disabledBIOS") + HostSgxInfoSgxStatesDisabledCFW101 = HostSgxInfoSgxStates("disabledCFW101") + HostSgxInfoSgxStatesDisabledCPUMismatch = HostSgxInfoSgxStates("disabledCPUMismatch") + HostSgxInfoSgxStatesDisabledNoFLC = HostSgxInfoSgxStates("disabledNoFLC") + HostSgxInfoSgxStatesDisabledNUMAUnsup = HostSgxInfoSgxStates("disabledNUMAUnsup") + HostSgxInfoSgxStatesDisabledMaxEPCRegs = HostSgxInfoSgxStates("disabledMaxEPCRegs") + HostSgxInfoSgxStatesEnabled = HostSgxInfoSgxStates("enabled") +) + +func init() { + t["HostSgxInfoSgxStates"] = reflect.TypeOf((*HostSgxInfoSgxStates)(nil)).Elem() +} + type HostSnmpAgentCapability string const ( @@ -1948,6 +2301,17 @@ func init() { t["HostStandbyMode"] = reflect.TypeOf((*HostStandbyMode)(nil)).Elem() } +type HostStorageProtocol string + +const ( + HostStorageProtocolScsi = HostStorageProtocol("scsi") + HostStorageProtocolNvme = HostStorageProtocol("nvme") +) + +func init() { + t["HostStorageProtocol"] = reflect.TypeOf((*HostStorageProtocol)(nil)).Elem() +} + type HostSystemConnectionState string const ( @@ -1963,9 +2327,11 @@ func init() { type HostSystemIdentificationInfoIdentifier string const ( - HostSystemIdentificationInfoIdentifierAssetTag = HostSystemIdentificationInfoIdentifier("AssetTag") - HostSystemIdentificationInfoIdentifierServiceTag = HostSystemIdentificationInfoIdentifier("ServiceTag") - HostSystemIdentificationInfoIdentifierOemSpecificString = HostSystemIdentificationInfoIdentifier("OemSpecificString") + HostSystemIdentificationInfoIdentifierAssetTag = HostSystemIdentificationInfoIdentifier("AssetTag") + HostSystemIdentificationInfoIdentifierServiceTag = HostSystemIdentificationInfoIdentifier("ServiceTag") + HostSystemIdentificationInfoIdentifierOemSpecificString = HostSystemIdentificationInfoIdentifier("OemSpecificString") + HostSystemIdentificationInfoIdentifierEnclosureSerialNumberTag = HostSystemIdentificationInfoIdentifier("EnclosureSerialNumberTag") + HostSystemIdentificationInfoIdentifierSerialNumberTag = HostSystemIdentificationInfoIdentifier("SerialNumberTag") ) func init() { @@ -2011,6 +2377,18 @@ func init() { t["HostTpmAttestationInfoAcceptanceStatus"] = reflect.TypeOf((*HostTpmAttestationInfoAcceptanceStatus)(nil)).Elem() } +type HostTrustAuthorityAttestationInfoAttestationStatus string + +const ( + HostTrustAuthorityAttestationInfoAttestationStatusAttested = HostTrustAuthorityAttestationInfoAttestationStatus("attested") + HostTrustAuthorityAttestationInfoAttestationStatusNotAttested = HostTrustAuthorityAttestationInfoAttestationStatus("notAttested") + HostTrustAuthorityAttestationInfoAttestationStatusUnknown = HostTrustAuthorityAttestationInfoAttestationStatus("unknown") +) + +func init() { + t["HostTrustAuthorityAttestationInfoAttestationStatus"] = reflect.TypeOf((*HostTrustAuthorityAttestationInfoAttestationStatus)(nil)).Elem() +} + type HostUnresolvedVmfsExtentUnresolvedReason string const ( @@ -2044,6 +2422,10 @@ const ( HostVirtualNicManagerNicTypeVsan = HostVirtualNicManagerNicType("vsan") HostVirtualNicManagerNicTypeVSphereProvisioning = HostVirtualNicManagerNicType("vSphereProvisioning") HostVirtualNicManagerNicTypeVsanWitness = HostVirtualNicManagerNicType("vsanWitness") + HostVirtualNicManagerNicTypeVSphereBackupNFC = HostVirtualNicManagerNicType("vSphereBackupNFC") + HostVirtualNicManagerNicTypePtp = HostVirtualNicManagerNicType("ptp") + HostVirtualNicManagerNicTypeNvmeTcp = HostVirtualNicManagerNicType("nvmeTcp") + HostVirtualNicManagerNicTypeNvmeRdma = HostVirtualNicManagerNicType("nvmeRdma") ) func init() { @@ -2187,6 +2569,7 @@ const ( IoFilterTypeInspection = IoFilterType("inspection") IoFilterTypeDatastoreIoControl = IoFilterType("datastoreIoControl") IoFilterTypeDataProvider = IoFilterType("dataProvider") + IoFilterTypeDataCapture = IoFilterType("dataCapture") ) func init() { @@ -2206,6 +2589,19 @@ func init() { t["IscsiPortInfoPathStatus"] = reflect.TypeOf((*IscsiPortInfoPathStatus)(nil)).Elem() } +type KmipClusterInfoKmsManagementType string + +const ( + KmipClusterInfoKmsManagementTypeUnknown = KmipClusterInfoKmsManagementType("unknown") + KmipClusterInfoKmsManagementTypeVCenter = KmipClusterInfoKmsManagementType("vCenter") + KmipClusterInfoKmsManagementTypeTrustAuthority = KmipClusterInfoKmsManagementType("trustAuthority") + KmipClusterInfoKmsManagementTypeNativeProvider = KmipClusterInfoKmsManagementType("nativeProvider") +) + +func init() { + t["KmipClusterInfoKmsManagementType"] = reflect.TypeOf((*KmipClusterInfoKmsManagementType)(nil)).Elem() +} + type LatencySensitivitySensitivityLevel string const ( @@ -2487,6 +2883,32 @@ func init() { t["NvdimmInterleaveSetState"] = reflect.TypeOf((*NvdimmInterleaveSetState)(nil)).Elem() } +type NvdimmNamespaceDetailsHealthStatus string + +const ( + NvdimmNamespaceDetailsHealthStatusNormal = NvdimmNamespaceDetailsHealthStatus("normal") + NvdimmNamespaceDetailsHealthStatusMissing = NvdimmNamespaceDetailsHealthStatus("missing") + NvdimmNamespaceDetailsHealthStatusLabelMissing = NvdimmNamespaceDetailsHealthStatus("labelMissing") + NvdimmNamespaceDetailsHealthStatusInterleaveBroken = NvdimmNamespaceDetailsHealthStatus("interleaveBroken") + NvdimmNamespaceDetailsHealthStatusLabelInconsistent = NvdimmNamespaceDetailsHealthStatus("labelInconsistent") +) + +func init() { + t["NvdimmNamespaceDetailsHealthStatus"] = reflect.TypeOf((*NvdimmNamespaceDetailsHealthStatus)(nil)).Elem() +} + +type NvdimmNamespaceDetailsState string + +const ( + NvdimmNamespaceDetailsStateInvalid = NvdimmNamespaceDetailsState("invalid") + NvdimmNamespaceDetailsStateNotInUse = NvdimmNamespaceDetailsState("notInUse") + NvdimmNamespaceDetailsStateInUse = NvdimmNamespaceDetailsState("inUse") +) + +func init() { + t["NvdimmNamespaceDetailsState"] = reflect.TypeOf((*NvdimmNamespaceDetailsState)(nil)).Elem() +} + type NvdimmNamespaceHealthStatus string const ( @@ -2651,6 +3073,7 @@ const ( PerformanceManagerUnitWatt = PerformanceManagerUnit("watt") PerformanceManagerUnitJoule = PerformanceManagerUnit("joule") PerformanceManagerUnitTeraBytes = PerformanceManagerUnit("teraBytes") + PerformanceManagerUnitCelsius = PerformanceManagerUnit("celsius") ) func init() { @@ -2841,6 +3264,10 @@ const ( RecommendationReasonCodeHostExitDegradation = RecommendationReasonCode("hostExitDegradation") RecommendationReasonCodeMaxVmsConstraint = RecommendationReasonCode("maxVmsConstraint") RecommendationReasonCodeFtConstraints = RecommendationReasonCode("ftConstraints") + RecommendationReasonCodeVmHostAffinityPolicy = RecommendationReasonCode("vmHostAffinityPolicy") + RecommendationReasonCodeVmHostAntiAffinityPolicy = RecommendationReasonCode("vmHostAntiAffinityPolicy") + RecommendationReasonCodeVmAntiAffinityPolicy = RecommendationReasonCode("vmAntiAffinityPolicy") + RecommendationReasonCodeBalanceVsanUsage = RecommendationReasonCode("balanceVsanUsage") ) func init() { @@ -2943,6 +3370,17 @@ func init() { t["ReplicationVmState"] = reflect.TypeOf((*ReplicationVmState)(nil)).Elem() } +type ResourceConfigSpecScaleSharesBehavior string + +const ( + ResourceConfigSpecScaleSharesBehaviorDisabled = ResourceConfigSpecScaleSharesBehavior("disabled") + ResourceConfigSpecScaleSharesBehaviorScaleCpuAndMemoryShares = ResourceConfigSpecScaleSharesBehavior("scaleCpuAndMemoryShares") +) + +func init() { + t["ResourceConfigSpecScaleSharesBehavior"] = reflect.TypeOf((*ResourceConfigSpecScaleSharesBehavior)(nil)).Elem() +} + type ScheduledHardwareUpgradeInfoHardwareUpgradePolicy string const ( @@ -3046,6 +3484,18 @@ func init() { t["ScsiLunVStorageSupportStatus"] = reflect.TypeOf((*ScsiLunVStorageSupportStatus)(nil)).Elem() } +type SessionManagerGenericServiceTicketTicketType string + +const ( + SessionManagerGenericServiceTicketTicketTypeHttpNfcServiceTicket = SessionManagerGenericServiceTicketTicketType("HttpNfcServiceTicket") + SessionManagerGenericServiceTicketTicketTypeHostServiceTicket = SessionManagerGenericServiceTicketTicketType("HostServiceTicket") + SessionManagerGenericServiceTicketTicketTypeVcServiceTicket = SessionManagerGenericServiceTicketTicketType("VcServiceTicket") +) + +func init() { + t["SessionManagerGenericServiceTicketTicketType"] = reflect.TypeOf((*SessionManagerGenericServiceTicketTicketType)(nil)).Elem() +} + type SessionManagerHttpServiceRequestSpecMethod string const ( @@ -3430,6 +3880,17 @@ func init() { t["VMwareUplinkLacpMode"] = reflect.TypeOf((*VMwareUplinkLacpMode)(nil)).Elem() } +type VMwareUplinkLacpTimeoutMode string + +const ( + VMwareUplinkLacpTimeoutModeFast = VMwareUplinkLacpTimeoutMode("fast") + VMwareUplinkLacpTimeoutModeSlow = VMwareUplinkLacpTimeoutMode("slow") +) + +func init() { + t["VMwareUplinkLacpTimeoutMode"] = reflect.TypeOf((*VMwareUplinkLacpTimeoutMode)(nil)).Elem() +} + type VStorageObjectConsumptionType string const ( @@ -3799,6 +4260,18 @@ func init() { t["VirtualMachineConfigInfoSwapPlacementType"] = reflect.TypeOf((*VirtualMachineConfigInfoSwapPlacementType)(nil)).Elem() } +type VirtualMachineConfigSpecEncryptedFtModes string + +const ( + VirtualMachineConfigSpecEncryptedFtModesFtEncryptionDisabled = VirtualMachineConfigSpecEncryptedFtModes("ftEncryptionDisabled") + VirtualMachineConfigSpecEncryptedFtModesFtEncryptionOpportunistic = VirtualMachineConfigSpecEncryptedFtModes("ftEncryptionOpportunistic") + VirtualMachineConfigSpecEncryptedFtModesFtEncryptionRequired = VirtualMachineConfigSpecEncryptedFtModes("ftEncryptionRequired") +) + +func init() { + t["VirtualMachineConfigSpecEncryptedFtModes"] = reflect.TypeOf((*VirtualMachineConfigSpecEncryptedFtModes)(nil)).Elem() +} + type VirtualMachineConfigSpecEncryptedVMotionModes string const ( @@ -4008,167 +4481,187 @@ func init() { type VirtualMachineGuestOsIdentifier string const ( - VirtualMachineGuestOsIdentifierDosGuest = VirtualMachineGuestOsIdentifier("dosGuest") - VirtualMachineGuestOsIdentifierWin31Guest = VirtualMachineGuestOsIdentifier("win31Guest") - VirtualMachineGuestOsIdentifierWin95Guest = VirtualMachineGuestOsIdentifier("win95Guest") - VirtualMachineGuestOsIdentifierWin98Guest = VirtualMachineGuestOsIdentifier("win98Guest") - VirtualMachineGuestOsIdentifierWinMeGuest = VirtualMachineGuestOsIdentifier("winMeGuest") - VirtualMachineGuestOsIdentifierWinNTGuest = VirtualMachineGuestOsIdentifier("winNTGuest") - VirtualMachineGuestOsIdentifierWin2000ProGuest = VirtualMachineGuestOsIdentifier("win2000ProGuest") - VirtualMachineGuestOsIdentifierWin2000ServGuest = VirtualMachineGuestOsIdentifier("win2000ServGuest") - VirtualMachineGuestOsIdentifierWin2000AdvServGuest = VirtualMachineGuestOsIdentifier("win2000AdvServGuest") - VirtualMachineGuestOsIdentifierWinXPHomeGuest = VirtualMachineGuestOsIdentifier("winXPHomeGuest") - VirtualMachineGuestOsIdentifierWinXPProGuest = VirtualMachineGuestOsIdentifier("winXPProGuest") - VirtualMachineGuestOsIdentifierWinXPPro64Guest = VirtualMachineGuestOsIdentifier("winXPPro64Guest") - VirtualMachineGuestOsIdentifierWinNetWebGuest = VirtualMachineGuestOsIdentifier("winNetWebGuest") - VirtualMachineGuestOsIdentifierWinNetStandardGuest = VirtualMachineGuestOsIdentifier("winNetStandardGuest") - VirtualMachineGuestOsIdentifierWinNetEnterpriseGuest = VirtualMachineGuestOsIdentifier("winNetEnterpriseGuest") - VirtualMachineGuestOsIdentifierWinNetDatacenterGuest = VirtualMachineGuestOsIdentifier("winNetDatacenterGuest") - VirtualMachineGuestOsIdentifierWinNetBusinessGuest = VirtualMachineGuestOsIdentifier("winNetBusinessGuest") - VirtualMachineGuestOsIdentifierWinNetStandard64Guest = VirtualMachineGuestOsIdentifier("winNetStandard64Guest") - VirtualMachineGuestOsIdentifierWinNetEnterprise64Guest = VirtualMachineGuestOsIdentifier("winNetEnterprise64Guest") - VirtualMachineGuestOsIdentifierWinLonghornGuest = VirtualMachineGuestOsIdentifier("winLonghornGuest") - VirtualMachineGuestOsIdentifierWinLonghorn64Guest = VirtualMachineGuestOsIdentifier("winLonghorn64Guest") - VirtualMachineGuestOsIdentifierWinNetDatacenter64Guest = VirtualMachineGuestOsIdentifier("winNetDatacenter64Guest") - VirtualMachineGuestOsIdentifierWinVistaGuest = VirtualMachineGuestOsIdentifier("winVistaGuest") - VirtualMachineGuestOsIdentifierWinVista64Guest = VirtualMachineGuestOsIdentifier("winVista64Guest") - VirtualMachineGuestOsIdentifierWindows7Guest = VirtualMachineGuestOsIdentifier("windows7Guest") - VirtualMachineGuestOsIdentifierWindows7_64Guest = VirtualMachineGuestOsIdentifier("windows7_64Guest") - VirtualMachineGuestOsIdentifierWindows7Server64Guest = VirtualMachineGuestOsIdentifier("windows7Server64Guest") - VirtualMachineGuestOsIdentifierWindows8Guest = VirtualMachineGuestOsIdentifier("windows8Guest") - VirtualMachineGuestOsIdentifierWindows8_64Guest = VirtualMachineGuestOsIdentifier("windows8_64Guest") - VirtualMachineGuestOsIdentifierWindows8Server64Guest = VirtualMachineGuestOsIdentifier("windows8Server64Guest") - VirtualMachineGuestOsIdentifierWindows9Guest = VirtualMachineGuestOsIdentifier("windows9Guest") - VirtualMachineGuestOsIdentifierWindows9_64Guest = VirtualMachineGuestOsIdentifier("windows9_64Guest") - VirtualMachineGuestOsIdentifierWindows9Server64Guest = VirtualMachineGuestOsIdentifier("windows9Server64Guest") - VirtualMachineGuestOsIdentifierWindowsHyperVGuest = VirtualMachineGuestOsIdentifier("windowsHyperVGuest") - VirtualMachineGuestOsIdentifierFreebsdGuest = VirtualMachineGuestOsIdentifier("freebsdGuest") - VirtualMachineGuestOsIdentifierFreebsd64Guest = VirtualMachineGuestOsIdentifier("freebsd64Guest") - VirtualMachineGuestOsIdentifierFreebsd11Guest = VirtualMachineGuestOsIdentifier("freebsd11Guest") - VirtualMachineGuestOsIdentifierFreebsd11_64Guest = VirtualMachineGuestOsIdentifier("freebsd11_64Guest") - VirtualMachineGuestOsIdentifierFreebsd12Guest = VirtualMachineGuestOsIdentifier("freebsd12Guest") - VirtualMachineGuestOsIdentifierFreebsd12_64Guest = VirtualMachineGuestOsIdentifier("freebsd12_64Guest") - VirtualMachineGuestOsIdentifierRedhatGuest = VirtualMachineGuestOsIdentifier("redhatGuest") - VirtualMachineGuestOsIdentifierRhel2Guest = VirtualMachineGuestOsIdentifier("rhel2Guest") - VirtualMachineGuestOsIdentifierRhel3Guest = VirtualMachineGuestOsIdentifier("rhel3Guest") - VirtualMachineGuestOsIdentifierRhel3_64Guest = VirtualMachineGuestOsIdentifier("rhel3_64Guest") - VirtualMachineGuestOsIdentifierRhel4Guest = VirtualMachineGuestOsIdentifier("rhel4Guest") - VirtualMachineGuestOsIdentifierRhel4_64Guest = VirtualMachineGuestOsIdentifier("rhel4_64Guest") - VirtualMachineGuestOsIdentifierRhel5Guest = VirtualMachineGuestOsIdentifier("rhel5Guest") - VirtualMachineGuestOsIdentifierRhel5_64Guest = VirtualMachineGuestOsIdentifier("rhel5_64Guest") - VirtualMachineGuestOsIdentifierRhel6Guest = VirtualMachineGuestOsIdentifier("rhel6Guest") - VirtualMachineGuestOsIdentifierRhel6_64Guest = VirtualMachineGuestOsIdentifier("rhel6_64Guest") - VirtualMachineGuestOsIdentifierRhel7Guest = VirtualMachineGuestOsIdentifier("rhel7Guest") - VirtualMachineGuestOsIdentifierRhel7_64Guest = VirtualMachineGuestOsIdentifier("rhel7_64Guest") - VirtualMachineGuestOsIdentifierRhel8_64Guest = VirtualMachineGuestOsIdentifier("rhel8_64Guest") - VirtualMachineGuestOsIdentifierCentosGuest = VirtualMachineGuestOsIdentifier("centosGuest") - VirtualMachineGuestOsIdentifierCentos64Guest = VirtualMachineGuestOsIdentifier("centos64Guest") - VirtualMachineGuestOsIdentifierCentos6Guest = VirtualMachineGuestOsIdentifier("centos6Guest") - VirtualMachineGuestOsIdentifierCentos6_64Guest = VirtualMachineGuestOsIdentifier("centos6_64Guest") - VirtualMachineGuestOsIdentifierCentos7Guest = VirtualMachineGuestOsIdentifier("centos7Guest") - VirtualMachineGuestOsIdentifierCentos7_64Guest = VirtualMachineGuestOsIdentifier("centos7_64Guest") - VirtualMachineGuestOsIdentifierCentos8_64Guest = VirtualMachineGuestOsIdentifier("centos8_64Guest") - VirtualMachineGuestOsIdentifierOracleLinuxGuest = VirtualMachineGuestOsIdentifier("oracleLinuxGuest") - VirtualMachineGuestOsIdentifierOracleLinux64Guest = VirtualMachineGuestOsIdentifier("oracleLinux64Guest") - VirtualMachineGuestOsIdentifierOracleLinux6Guest = VirtualMachineGuestOsIdentifier("oracleLinux6Guest") - VirtualMachineGuestOsIdentifierOracleLinux6_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux6_64Guest") - VirtualMachineGuestOsIdentifierOracleLinux7Guest = VirtualMachineGuestOsIdentifier("oracleLinux7Guest") - VirtualMachineGuestOsIdentifierOracleLinux7_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux7_64Guest") - VirtualMachineGuestOsIdentifierOracleLinux8_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux8_64Guest") - VirtualMachineGuestOsIdentifierSuseGuest = VirtualMachineGuestOsIdentifier("suseGuest") - VirtualMachineGuestOsIdentifierSuse64Guest = VirtualMachineGuestOsIdentifier("suse64Guest") - VirtualMachineGuestOsIdentifierSlesGuest = VirtualMachineGuestOsIdentifier("slesGuest") - VirtualMachineGuestOsIdentifierSles64Guest = VirtualMachineGuestOsIdentifier("sles64Guest") - VirtualMachineGuestOsIdentifierSles10Guest = VirtualMachineGuestOsIdentifier("sles10Guest") - VirtualMachineGuestOsIdentifierSles10_64Guest = VirtualMachineGuestOsIdentifier("sles10_64Guest") - VirtualMachineGuestOsIdentifierSles11Guest = VirtualMachineGuestOsIdentifier("sles11Guest") - VirtualMachineGuestOsIdentifierSles11_64Guest = VirtualMachineGuestOsIdentifier("sles11_64Guest") - VirtualMachineGuestOsIdentifierSles12Guest = VirtualMachineGuestOsIdentifier("sles12Guest") - VirtualMachineGuestOsIdentifierSles12_64Guest = VirtualMachineGuestOsIdentifier("sles12_64Guest") - VirtualMachineGuestOsIdentifierSles15_64Guest = VirtualMachineGuestOsIdentifier("sles15_64Guest") - VirtualMachineGuestOsIdentifierNld9Guest = VirtualMachineGuestOsIdentifier("nld9Guest") - VirtualMachineGuestOsIdentifierOesGuest = VirtualMachineGuestOsIdentifier("oesGuest") - VirtualMachineGuestOsIdentifierSjdsGuest = VirtualMachineGuestOsIdentifier("sjdsGuest") - VirtualMachineGuestOsIdentifierMandrakeGuest = VirtualMachineGuestOsIdentifier("mandrakeGuest") - VirtualMachineGuestOsIdentifierMandrivaGuest = VirtualMachineGuestOsIdentifier("mandrivaGuest") - VirtualMachineGuestOsIdentifierMandriva64Guest = VirtualMachineGuestOsIdentifier("mandriva64Guest") - VirtualMachineGuestOsIdentifierTurboLinuxGuest = VirtualMachineGuestOsIdentifier("turboLinuxGuest") - VirtualMachineGuestOsIdentifierTurboLinux64Guest = VirtualMachineGuestOsIdentifier("turboLinux64Guest") - VirtualMachineGuestOsIdentifierUbuntuGuest = VirtualMachineGuestOsIdentifier("ubuntuGuest") - VirtualMachineGuestOsIdentifierUbuntu64Guest = VirtualMachineGuestOsIdentifier("ubuntu64Guest") - VirtualMachineGuestOsIdentifierDebian4Guest = VirtualMachineGuestOsIdentifier("debian4Guest") - VirtualMachineGuestOsIdentifierDebian4_64Guest = VirtualMachineGuestOsIdentifier("debian4_64Guest") - VirtualMachineGuestOsIdentifierDebian5Guest = VirtualMachineGuestOsIdentifier("debian5Guest") - VirtualMachineGuestOsIdentifierDebian5_64Guest = VirtualMachineGuestOsIdentifier("debian5_64Guest") - VirtualMachineGuestOsIdentifierDebian6Guest = VirtualMachineGuestOsIdentifier("debian6Guest") - VirtualMachineGuestOsIdentifierDebian6_64Guest = VirtualMachineGuestOsIdentifier("debian6_64Guest") - VirtualMachineGuestOsIdentifierDebian7Guest = VirtualMachineGuestOsIdentifier("debian7Guest") - VirtualMachineGuestOsIdentifierDebian7_64Guest = VirtualMachineGuestOsIdentifier("debian7_64Guest") - VirtualMachineGuestOsIdentifierDebian8Guest = VirtualMachineGuestOsIdentifier("debian8Guest") - VirtualMachineGuestOsIdentifierDebian8_64Guest = VirtualMachineGuestOsIdentifier("debian8_64Guest") - VirtualMachineGuestOsIdentifierDebian9Guest = VirtualMachineGuestOsIdentifier("debian9Guest") - VirtualMachineGuestOsIdentifierDebian9_64Guest = VirtualMachineGuestOsIdentifier("debian9_64Guest") - VirtualMachineGuestOsIdentifierDebian10Guest = VirtualMachineGuestOsIdentifier("debian10Guest") - VirtualMachineGuestOsIdentifierDebian10_64Guest = VirtualMachineGuestOsIdentifier("debian10_64Guest") - VirtualMachineGuestOsIdentifierAsianux3Guest = VirtualMachineGuestOsIdentifier("asianux3Guest") - VirtualMachineGuestOsIdentifierAsianux3_64Guest = VirtualMachineGuestOsIdentifier("asianux3_64Guest") - VirtualMachineGuestOsIdentifierAsianux4Guest = VirtualMachineGuestOsIdentifier("asianux4Guest") - VirtualMachineGuestOsIdentifierAsianux4_64Guest = VirtualMachineGuestOsIdentifier("asianux4_64Guest") - VirtualMachineGuestOsIdentifierAsianux5_64Guest = VirtualMachineGuestOsIdentifier("asianux5_64Guest") - VirtualMachineGuestOsIdentifierAsianux7_64Guest = VirtualMachineGuestOsIdentifier("asianux7_64Guest") - VirtualMachineGuestOsIdentifierAsianux8_64Guest = VirtualMachineGuestOsIdentifier("asianux8_64Guest") - VirtualMachineGuestOsIdentifierOpensuseGuest = VirtualMachineGuestOsIdentifier("opensuseGuest") - VirtualMachineGuestOsIdentifierOpensuse64Guest = VirtualMachineGuestOsIdentifier("opensuse64Guest") - VirtualMachineGuestOsIdentifierFedoraGuest = VirtualMachineGuestOsIdentifier("fedoraGuest") - VirtualMachineGuestOsIdentifierFedora64Guest = VirtualMachineGuestOsIdentifier("fedora64Guest") - VirtualMachineGuestOsIdentifierCoreos64Guest = VirtualMachineGuestOsIdentifier("coreos64Guest") - VirtualMachineGuestOsIdentifierVmwarePhoton64Guest = VirtualMachineGuestOsIdentifier("vmwarePhoton64Guest") - VirtualMachineGuestOsIdentifierOther24xLinuxGuest = VirtualMachineGuestOsIdentifier("other24xLinuxGuest") - VirtualMachineGuestOsIdentifierOther26xLinuxGuest = VirtualMachineGuestOsIdentifier("other26xLinuxGuest") - VirtualMachineGuestOsIdentifierOtherLinuxGuest = VirtualMachineGuestOsIdentifier("otherLinuxGuest") - VirtualMachineGuestOsIdentifierOther3xLinuxGuest = VirtualMachineGuestOsIdentifier("other3xLinuxGuest") - VirtualMachineGuestOsIdentifierOther4xLinuxGuest = VirtualMachineGuestOsIdentifier("other4xLinuxGuest") - VirtualMachineGuestOsIdentifierGenericLinuxGuest = VirtualMachineGuestOsIdentifier("genericLinuxGuest") - VirtualMachineGuestOsIdentifierOther24xLinux64Guest = VirtualMachineGuestOsIdentifier("other24xLinux64Guest") - VirtualMachineGuestOsIdentifierOther26xLinux64Guest = VirtualMachineGuestOsIdentifier("other26xLinux64Guest") - VirtualMachineGuestOsIdentifierOther3xLinux64Guest = VirtualMachineGuestOsIdentifier("other3xLinux64Guest") - VirtualMachineGuestOsIdentifierOther4xLinux64Guest = VirtualMachineGuestOsIdentifier("other4xLinux64Guest") - VirtualMachineGuestOsIdentifierOtherLinux64Guest = VirtualMachineGuestOsIdentifier("otherLinux64Guest") - VirtualMachineGuestOsIdentifierSolaris6Guest = VirtualMachineGuestOsIdentifier("solaris6Guest") - VirtualMachineGuestOsIdentifierSolaris7Guest = VirtualMachineGuestOsIdentifier("solaris7Guest") - VirtualMachineGuestOsIdentifierSolaris8Guest = VirtualMachineGuestOsIdentifier("solaris8Guest") - VirtualMachineGuestOsIdentifierSolaris9Guest = VirtualMachineGuestOsIdentifier("solaris9Guest") - VirtualMachineGuestOsIdentifierSolaris10Guest = VirtualMachineGuestOsIdentifier("solaris10Guest") - VirtualMachineGuestOsIdentifierSolaris10_64Guest = VirtualMachineGuestOsIdentifier("solaris10_64Guest") - VirtualMachineGuestOsIdentifierSolaris11_64Guest = VirtualMachineGuestOsIdentifier("solaris11_64Guest") - VirtualMachineGuestOsIdentifierOs2Guest = VirtualMachineGuestOsIdentifier("os2Guest") - VirtualMachineGuestOsIdentifierEComStationGuest = VirtualMachineGuestOsIdentifier("eComStationGuest") - VirtualMachineGuestOsIdentifierEComStation2Guest = VirtualMachineGuestOsIdentifier("eComStation2Guest") - VirtualMachineGuestOsIdentifierNetware4Guest = VirtualMachineGuestOsIdentifier("netware4Guest") - VirtualMachineGuestOsIdentifierNetware5Guest = VirtualMachineGuestOsIdentifier("netware5Guest") - VirtualMachineGuestOsIdentifierNetware6Guest = VirtualMachineGuestOsIdentifier("netware6Guest") - VirtualMachineGuestOsIdentifierOpenServer5Guest = VirtualMachineGuestOsIdentifier("openServer5Guest") - VirtualMachineGuestOsIdentifierOpenServer6Guest = VirtualMachineGuestOsIdentifier("openServer6Guest") - VirtualMachineGuestOsIdentifierUnixWare7Guest = VirtualMachineGuestOsIdentifier("unixWare7Guest") - VirtualMachineGuestOsIdentifierDarwinGuest = VirtualMachineGuestOsIdentifier("darwinGuest") - VirtualMachineGuestOsIdentifierDarwin64Guest = VirtualMachineGuestOsIdentifier("darwin64Guest") - VirtualMachineGuestOsIdentifierDarwin10Guest = VirtualMachineGuestOsIdentifier("darwin10Guest") - VirtualMachineGuestOsIdentifierDarwin10_64Guest = VirtualMachineGuestOsIdentifier("darwin10_64Guest") - VirtualMachineGuestOsIdentifierDarwin11Guest = VirtualMachineGuestOsIdentifier("darwin11Guest") - VirtualMachineGuestOsIdentifierDarwin11_64Guest = VirtualMachineGuestOsIdentifier("darwin11_64Guest") - VirtualMachineGuestOsIdentifierDarwin12_64Guest = VirtualMachineGuestOsIdentifier("darwin12_64Guest") - VirtualMachineGuestOsIdentifierDarwin13_64Guest = VirtualMachineGuestOsIdentifier("darwin13_64Guest") - VirtualMachineGuestOsIdentifierDarwin14_64Guest = VirtualMachineGuestOsIdentifier("darwin14_64Guest") - VirtualMachineGuestOsIdentifierDarwin15_64Guest = VirtualMachineGuestOsIdentifier("darwin15_64Guest") - VirtualMachineGuestOsIdentifierDarwin16_64Guest = VirtualMachineGuestOsIdentifier("darwin16_64Guest") - VirtualMachineGuestOsIdentifierDarwin17_64Guest = VirtualMachineGuestOsIdentifier("darwin17_64Guest") - VirtualMachineGuestOsIdentifierDarwin18_64Guest = VirtualMachineGuestOsIdentifier("darwin18_64Guest") - VirtualMachineGuestOsIdentifierVmkernelGuest = VirtualMachineGuestOsIdentifier("vmkernelGuest") - VirtualMachineGuestOsIdentifierVmkernel5Guest = VirtualMachineGuestOsIdentifier("vmkernel5Guest") - VirtualMachineGuestOsIdentifierVmkernel6Guest = VirtualMachineGuestOsIdentifier("vmkernel6Guest") - VirtualMachineGuestOsIdentifierVmkernel65Guest = VirtualMachineGuestOsIdentifier("vmkernel65Guest") - VirtualMachineGuestOsIdentifierOtherGuest = VirtualMachineGuestOsIdentifier("otherGuest") - VirtualMachineGuestOsIdentifierOtherGuest64 = VirtualMachineGuestOsIdentifier("otherGuest64") + VirtualMachineGuestOsIdentifierDosGuest = VirtualMachineGuestOsIdentifier("dosGuest") + VirtualMachineGuestOsIdentifierWin31Guest = VirtualMachineGuestOsIdentifier("win31Guest") + VirtualMachineGuestOsIdentifierWin95Guest = VirtualMachineGuestOsIdentifier("win95Guest") + VirtualMachineGuestOsIdentifierWin98Guest = VirtualMachineGuestOsIdentifier("win98Guest") + VirtualMachineGuestOsIdentifierWinMeGuest = VirtualMachineGuestOsIdentifier("winMeGuest") + VirtualMachineGuestOsIdentifierWinNTGuest = VirtualMachineGuestOsIdentifier("winNTGuest") + VirtualMachineGuestOsIdentifierWin2000ProGuest = VirtualMachineGuestOsIdentifier("win2000ProGuest") + VirtualMachineGuestOsIdentifierWin2000ServGuest = VirtualMachineGuestOsIdentifier("win2000ServGuest") + VirtualMachineGuestOsIdentifierWin2000AdvServGuest = VirtualMachineGuestOsIdentifier("win2000AdvServGuest") + VirtualMachineGuestOsIdentifierWinXPHomeGuest = VirtualMachineGuestOsIdentifier("winXPHomeGuest") + VirtualMachineGuestOsIdentifierWinXPProGuest = VirtualMachineGuestOsIdentifier("winXPProGuest") + VirtualMachineGuestOsIdentifierWinXPPro64Guest = VirtualMachineGuestOsIdentifier("winXPPro64Guest") + VirtualMachineGuestOsIdentifierWinNetWebGuest = VirtualMachineGuestOsIdentifier("winNetWebGuest") + VirtualMachineGuestOsIdentifierWinNetStandardGuest = VirtualMachineGuestOsIdentifier("winNetStandardGuest") + VirtualMachineGuestOsIdentifierWinNetEnterpriseGuest = VirtualMachineGuestOsIdentifier("winNetEnterpriseGuest") + VirtualMachineGuestOsIdentifierWinNetDatacenterGuest = VirtualMachineGuestOsIdentifier("winNetDatacenterGuest") + VirtualMachineGuestOsIdentifierWinNetBusinessGuest = VirtualMachineGuestOsIdentifier("winNetBusinessGuest") + VirtualMachineGuestOsIdentifierWinNetStandard64Guest = VirtualMachineGuestOsIdentifier("winNetStandard64Guest") + VirtualMachineGuestOsIdentifierWinNetEnterprise64Guest = VirtualMachineGuestOsIdentifier("winNetEnterprise64Guest") + VirtualMachineGuestOsIdentifierWinLonghornGuest = VirtualMachineGuestOsIdentifier("winLonghornGuest") + VirtualMachineGuestOsIdentifierWinLonghorn64Guest = VirtualMachineGuestOsIdentifier("winLonghorn64Guest") + VirtualMachineGuestOsIdentifierWinNetDatacenter64Guest = VirtualMachineGuestOsIdentifier("winNetDatacenter64Guest") + VirtualMachineGuestOsIdentifierWinVistaGuest = VirtualMachineGuestOsIdentifier("winVistaGuest") + VirtualMachineGuestOsIdentifierWinVista64Guest = VirtualMachineGuestOsIdentifier("winVista64Guest") + VirtualMachineGuestOsIdentifierWindows7Guest = VirtualMachineGuestOsIdentifier("windows7Guest") + VirtualMachineGuestOsIdentifierWindows7_64Guest = VirtualMachineGuestOsIdentifier("windows7_64Guest") + VirtualMachineGuestOsIdentifierWindows7Server64Guest = VirtualMachineGuestOsIdentifier("windows7Server64Guest") + VirtualMachineGuestOsIdentifierWindows8Guest = VirtualMachineGuestOsIdentifier("windows8Guest") + VirtualMachineGuestOsIdentifierWindows8_64Guest = VirtualMachineGuestOsIdentifier("windows8_64Guest") + VirtualMachineGuestOsIdentifierWindows8Server64Guest = VirtualMachineGuestOsIdentifier("windows8Server64Guest") + VirtualMachineGuestOsIdentifierWindows9Guest = VirtualMachineGuestOsIdentifier("windows9Guest") + VirtualMachineGuestOsIdentifierWindows9_64Guest = VirtualMachineGuestOsIdentifier("windows9_64Guest") + VirtualMachineGuestOsIdentifierWindows9Server64Guest = VirtualMachineGuestOsIdentifier("windows9Server64Guest") + VirtualMachineGuestOsIdentifierWindowsHyperVGuest = VirtualMachineGuestOsIdentifier("windowsHyperVGuest") + VirtualMachineGuestOsIdentifierWindows2019srv_64Guest = VirtualMachineGuestOsIdentifier("windows2019srv_64Guest") + VirtualMachineGuestOsIdentifierWindows2019srvNext_64Guest = VirtualMachineGuestOsIdentifier("windows2019srvNext_64Guest") + VirtualMachineGuestOsIdentifierFreebsdGuest = VirtualMachineGuestOsIdentifier("freebsdGuest") + VirtualMachineGuestOsIdentifierFreebsd64Guest = VirtualMachineGuestOsIdentifier("freebsd64Guest") + VirtualMachineGuestOsIdentifierFreebsd11Guest = VirtualMachineGuestOsIdentifier("freebsd11Guest") + VirtualMachineGuestOsIdentifierFreebsd11_64Guest = VirtualMachineGuestOsIdentifier("freebsd11_64Guest") + VirtualMachineGuestOsIdentifierFreebsd12Guest = VirtualMachineGuestOsIdentifier("freebsd12Guest") + VirtualMachineGuestOsIdentifierFreebsd12_64Guest = VirtualMachineGuestOsIdentifier("freebsd12_64Guest") + VirtualMachineGuestOsIdentifierFreebsd13Guest = VirtualMachineGuestOsIdentifier("freebsd13Guest") + VirtualMachineGuestOsIdentifierFreebsd13_64Guest = VirtualMachineGuestOsIdentifier("freebsd13_64Guest") + VirtualMachineGuestOsIdentifierRedhatGuest = VirtualMachineGuestOsIdentifier("redhatGuest") + VirtualMachineGuestOsIdentifierRhel2Guest = VirtualMachineGuestOsIdentifier("rhel2Guest") + VirtualMachineGuestOsIdentifierRhel3Guest = VirtualMachineGuestOsIdentifier("rhel3Guest") + VirtualMachineGuestOsIdentifierRhel3_64Guest = VirtualMachineGuestOsIdentifier("rhel3_64Guest") + VirtualMachineGuestOsIdentifierRhel4Guest = VirtualMachineGuestOsIdentifier("rhel4Guest") + VirtualMachineGuestOsIdentifierRhel4_64Guest = VirtualMachineGuestOsIdentifier("rhel4_64Guest") + VirtualMachineGuestOsIdentifierRhel5Guest = VirtualMachineGuestOsIdentifier("rhel5Guest") + VirtualMachineGuestOsIdentifierRhel5_64Guest = VirtualMachineGuestOsIdentifier("rhel5_64Guest") + VirtualMachineGuestOsIdentifierRhel6Guest = VirtualMachineGuestOsIdentifier("rhel6Guest") + VirtualMachineGuestOsIdentifierRhel6_64Guest = VirtualMachineGuestOsIdentifier("rhel6_64Guest") + VirtualMachineGuestOsIdentifierRhel7Guest = VirtualMachineGuestOsIdentifier("rhel7Guest") + VirtualMachineGuestOsIdentifierRhel7_64Guest = VirtualMachineGuestOsIdentifier("rhel7_64Guest") + VirtualMachineGuestOsIdentifierRhel8_64Guest = VirtualMachineGuestOsIdentifier("rhel8_64Guest") + VirtualMachineGuestOsIdentifierRhel9_64Guest = VirtualMachineGuestOsIdentifier("rhel9_64Guest") + VirtualMachineGuestOsIdentifierCentosGuest = VirtualMachineGuestOsIdentifier("centosGuest") + VirtualMachineGuestOsIdentifierCentos64Guest = VirtualMachineGuestOsIdentifier("centos64Guest") + VirtualMachineGuestOsIdentifierCentos6Guest = VirtualMachineGuestOsIdentifier("centos6Guest") + VirtualMachineGuestOsIdentifierCentos6_64Guest = VirtualMachineGuestOsIdentifier("centos6_64Guest") + VirtualMachineGuestOsIdentifierCentos7Guest = VirtualMachineGuestOsIdentifier("centos7Guest") + VirtualMachineGuestOsIdentifierCentos7_64Guest = VirtualMachineGuestOsIdentifier("centos7_64Guest") + VirtualMachineGuestOsIdentifierCentos8_64Guest = VirtualMachineGuestOsIdentifier("centos8_64Guest") + VirtualMachineGuestOsIdentifierCentos9_64Guest = VirtualMachineGuestOsIdentifier("centos9_64Guest") + VirtualMachineGuestOsIdentifierOracleLinuxGuest = VirtualMachineGuestOsIdentifier("oracleLinuxGuest") + VirtualMachineGuestOsIdentifierOracleLinux64Guest = VirtualMachineGuestOsIdentifier("oracleLinux64Guest") + VirtualMachineGuestOsIdentifierOracleLinux6Guest = VirtualMachineGuestOsIdentifier("oracleLinux6Guest") + VirtualMachineGuestOsIdentifierOracleLinux6_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux6_64Guest") + VirtualMachineGuestOsIdentifierOracleLinux7Guest = VirtualMachineGuestOsIdentifier("oracleLinux7Guest") + VirtualMachineGuestOsIdentifierOracleLinux7_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux7_64Guest") + VirtualMachineGuestOsIdentifierOracleLinux8_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux8_64Guest") + VirtualMachineGuestOsIdentifierOracleLinux9_64Guest = VirtualMachineGuestOsIdentifier("oracleLinux9_64Guest") + VirtualMachineGuestOsIdentifierSuseGuest = VirtualMachineGuestOsIdentifier("suseGuest") + VirtualMachineGuestOsIdentifierSuse64Guest = VirtualMachineGuestOsIdentifier("suse64Guest") + VirtualMachineGuestOsIdentifierSlesGuest = VirtualMachineGuestOsIdentifier("slesGuest") + VirtualMachineGuestOsIdentifierSles64Guest = VirtualMachineGuestOsIdentifier("sles64Guest") + VirtualMachineGuestOsIdentifierSles10Guest = VirtualMachineGuestOsIdentifier("sles10Guest") + VirtualMachineGuestOsIdentifierSles10_64Guest = VirtualMachineGuestOsIdentifier("sles10_64Guest") + VirtualMachineGuestOsIdentifierSles11Guest = VirtualMachineGuestOsIdentifier("sles11Guest") + VirtualMachineGuestOsIdentifierSles11_64Guest = VirtualMachineGuestOsIdentifier("sles11_64Guest") + VirtualMachineGuestOsIdentifierSles12Guest = VirtualMachineGuestOsIdentifier("sles12Guest") + VirtualMachineGuestOsIdentifierSles12_64Guest = VirtualMachineGuestOsIdentifier("sles12_64Guest") + VirtualMachineGuestOsIdentifierSles15_64Guest = VirtualMachineGuestOsIdentifier("sles15_64Guest") + VirtualMachineGuestOsIdentifierSles16_64Guest = VirtualMachineGuestOsIdentifier("sles16_64Guest") + VirtualMachineGuestOsIdentifierNld9Guest = VirtualMachineGuestOsIdentifier("nld9Guest") + VirtualMachineGuestOsIdentifierOesGuest = VirtualMachineGuestOsIdentifier("oesGuest") + VirtualMachineGuestOsIdentifierSjdsGuest = VirtualMachineGuestOsIdentifier("sjdsGuest") + VirtualMachineGuestOsIdentifierMandrakeGuest = VirtualMachineGuestOsIdentifier("mandrakeGuest") + VirtualMachineGuestOsIdentifierMandrivaGuest = VirtualMachineGuestOsIdentifier("mandrivaGuest") + VirtualMachineGuestOsIdentifierMandriva64Guest = VirtualMachineGuestOsIdentifier("mandriva64Guest") + VirtualMachineGuestOsIdentifierTurboLinuxGuest = VirtualMachineGuestOsIdentifier("turboLinuxGuest") + VirtualMachineGuestOsIdentifierTurboLinux64Guest = VirtualMachineGuestOsIdentifier("turboLinux64Guest") + VirtualMachineGuestOsIdentifierUbuntuGuest = VirtualMachineGuestOsIdentifier("ubuntuGuest") + VirtualMachineGuestOsIdentifierUbuntu64Guest = VirtualMachineGuestOsIdentifier("ubuntu64Guest") + VirtualMachineGuestOsIdentifierDebian4Guest = VirtualMachineGuestOsIdentifier("debian4Guest") + VirtualMachineGuestOsIdentifierDebian4_64Guest = VirtualMachineGuestOsIdentifier("debian4_64Guest") + VirtualMachineGuestOsIdentifierDebian5Guest = VirtualMachineGuestOsIdentifier("debian5Guest") + VirtualMachineGuestOsIdentifierDebian5_64Guest = VirtualMachineGuestOsIdentifier("debian5_64Guest") + VirtualMachineGuestOsIdentifierDebian6Guest = VirtualMachineGuestOsIdentifier("debian6Guest") + VirtualMachineGuestOsIdentifierDebian6_64Guest = VirtualMachineGuestOsIdentifier("debian6_64Guest") + VirtualMachineGuestOsIdentifierDebian7Guest = VirtualMachineGuestOsIdentifier("debian7Guest") + VirtualMachineGuestOsIdentifierDebian7_64Guest = VirtualMachineGuestOsIdentifier("debian7_64Guest") + VirtualMachineGuestOsIdentifierDebian8Guest = VirtualMachineGuestOsIdentifier("debian8Guest") + VirtualMachineGuestOsIdentifierDebian8_64Guest = VirtualMachineGuestOsIdentifier("debian8_64Guest") + VirtualMachineGuestOsIdentifierDebian9Guest = VirtualMachineGuestOsIdentifier("debian9Guest") + VirtualMachineGuestOsIdentifierDebian9_64Guest = VirtualMachineGuestOsIdentifier("debian9_64Guest") + VirtualMachineGuestOsIdentifierDebian10Guest = VirtualMachineGuestOsIdentifier("debian10Guest") + VirtualMachineGuestOsIdentifierDebian10_64Guest = VirtualMachineGuestOsIdentifier("debian10_64Guest") + VirtualMachineGuestOsIdentifierDebian11Guest = VirtualMachineGuestOsIdentifier("debian11Guest") + VirtualMachineGuestOsIdentifierDebian11_64Guest = VirtualMachineGuestOsIdentifier("debian11_64Guest") + VirtualMachineGuestOsIdentifierAsianux3Guest = VirtualMachineGuestOsIdentifier("asianux3Guest") + VirtualMachineGuestOsIdentifierAsianux3_64Guest = VirtualMachineGuestOsIdentifier("asianux3_64Guest") + VirtualMachineGuestOsIdentifierAsianux4Guest = VirtualMachineGuestOsIdentifier("asianux4Guest") + VirtualMachineGuestOsIdentifierAsianux4_64Guest = VirtualMachineGuestOsIdentifier("asianux4_64Guest") + VirtualMachineGuestOsIdentifierAsianux5_64Guest = VirtualMachineGuestOsIdentifier("asianux5_64Guest") + VirtualMachineGuestOsIdentifierAsianux7_64Guest = VirtualMachineGuestOsIdentifier("asianux7_64Guest") + VirtualMachineGuestOsIdentifierAsianux8_64Guest = VirtualMachineGuestOsIdentifier("asianux8_64Guest") + VirtualMachineGuestOsIdentifierAsianux9_64Guest = VirtualMachineGuestOsIdentifier("asianux9_64Guest") + VirtualMachineGuestOsIdentifierOpensuseGuest = VirtualMachineGuestOsIdentifier("opensuseGuest") + VirtualMachineGuestOsIdentifierOpensuse64Guest = VirtualMachineGuestOsIdentifier("opensuse64Guest") + VirtualMachineGuestOsIdentifierFedoraGuest = VirtualMachineGuestOsIdentifier("fedoraGuest") + VirtualMachineGuestOsIdentifierFedora64Guest = VirtualMachineGuestOsIdentifier("fedora64Guest") + VirtualMachineGuestOsIdentifierCoreos64Guest = VirtualMachineGuestOsIdentifier("coreos64Guest") + VirtualMachineGuestOsIdentifierVmwarePhoton64Guest = VirtualMachineGuestOsIdentifier("vmwarePhoton64Guest") + VirtualMachineGuestOsIdentifierOther24xLinuxGuest = VirtualMachineGuestOsIdentifier("other24xLinuxGuest") + VirtualMachineGuestOsIdentifierOther26xLinuxGuest = VirtualMachineGuestOsIdentifier("other26xLinuxGuest") + VirtualMachineGuestOsIdentifierOtherLinuxGuest = VirtualMachineGuestOsIdentifier("otherLinuxGuest") + VirtualMachineGuestOsIdentifierOther3xLinuxGuest = VirtualMachineGuestOsIdentifier("other3xLinuxGuest") + VirtualMachineGuestOsIdentifierOther4xLinuxGuest = VirtualMachineGuestOsIdentifier("other4xLinuxGuest") + VirtualMachineGuestOsIdentifierOther5xLinuxGuest = VirtualMachineGuestOsIdentifier("other5xLinuxGuest") + VirtualMachineGuestOsIdentifierGenericLinuxGuest = VirtualMachineGuestOsIdentifier("genericLinuxGuest") + VirtualMachineGuestOsIdentifierOther24xLinux64Guest = VirtualMachineGuestOsIdentifier("other24xLinux64Guest") + VirtualMachineGuestOsIdentifierOther26xLinux64Guest = VirtualMachineGuestOsIdentifier("other26xLinux64Guest") + VirtualMachineGuestOsIdentifierOther3xLinux64Guest = VirtualMachineGuestOsIdentifier("other3xLinux64Guest") + VirtualMachineGuestOsIdentifierOther4xLinux64Guest = VirtualMachineGuestOsIdentifier("other4xLinux64Guest") + VirtualMachineGuestOsIdentifierOther5xLinux64Guest = VirtualMachineGuestOsIdentifier("other5xLinux64Guest") + VirtualMachineGuestOsIdentifierOtherLinux64Guest = VirtualMachineGuestOsIdentifier("otherLinux64Guest") + VirtualMachineGuestOsIdentifierSolaris6Guest = VirtualMachineGuestOsIdentifier("solaris6Guest") + VirtualMachineGuestOsIdentifierSolaris7Guest = VirtualMachineGuestOsIdentifier("solaris7Guest") + VirtualMachineGuestOsIdentifierSolaris8Guest = VirtualMachineGuestOsIdentifier("solaris8Guest") + VirtualMachineGuestOsIdentifierSolaris9Guest = VirtualMachineGuestOsIdentifier("solaris9Guest") + VirtualMachineGuestOsIdentifierSolaris10Guest = VirtualMachineGuestOsIdentifier("solaris10Guest") + VirtualMachineGuestOsIdentifierSolaris10_64Guest = VirtualMachineGuestOsIdentifier("solaris10_64Guest") + VirtualMachineGuestOsIdentifierSolaris11_64Guest = VirtualMachineGuestOsIdentifier("solaris11_64Guest") + VirtualMachineGuestOsIdentifierOs2Guest = VirtualMachineGuestOsIdentifier("os2Guest") + VirtualMachineGuestOsIdentifierEComStationGuest = VirtualMachineGuestOsIdentifier("eComStationGuest") + VirtualMachineGuestOsIdentifierEComStation2Guest = VirtualMachineGuestOsIdentifier("eComStation2Guest") + VirtualMachineGuestOsIdentifierNetware4Guest = VirtualMachineGuestOsIdentifier("netware4Guest") + VirtualMachineGuestOsIdentifierNetware5Guest = VirtualMachineGuestOsIdentifier("netware5Guest") + VirtualMachineGuestOsIdentifierNetware6Guest = VirtualMachineGuestOsIdentifier("netware6Guest") + VirtualMachineGuestOsIdentifierOpenServer5Guest = VirtualMachineGuestOsIdentifier("openServer5Guest") + VirtualMachineGuestOsIdentifierOpenServer6Guest = VirtualMachineGuestOsIdentifier("openServer6Guest") + VirtualMachineGuestOsIdentifierUnixWare7Guest = VirtualMachineGuestOsIdentifier("unixWare7Guest") + VirtualMachineGuestOsIdentifierDarwinGuest = VirtualMachineGuestOsIdentifier("darwinGuest") + VirtualMachineGuestOsIdentifierDarwin64Guest = VirtualMachineGuestOsIdentifier("darwin64Guest") + VirtualMachineGuestOsIdentifierDarwin10Guest = VirtualMachineGuestOsIdentifier("darwin10Guest") + VirtualMachineGuestOsIdentifierDarwin10_64Guest = VirtualMachineGuestOsIdentifier("darwin10_64Guest") + VirtualMachineGuestOsIdentifierDarwin11Guest = VirtualMachineGuestOsIdentifier("darwin11Guest") + VirtualMachineGuestOsIdentifierDarwin11_64Guest = VirtualMachineGuestOsIdentifier("darwin11_64Guest") + VirtualMachineGuestOsIdentifierDarwin12_64Guest = VirtualMachineGuestOsIdentifier("darwin12_64Guest") + VirtualMachineGuestOsIdentifierDarwin13_64Guest = VirtualMachineGuestOsIdentifier("darwin13_64Guest") + VirtualMachineGuestOsIdentifierDarwin14_64Guest = VirtualMachineGuestOsIdentifier("darwin14_64Guest") + VirtualMachineGuestOsIdentifierDarwin15_64Guest = VirtualMachineGuestOsIdentifier("darwin15_64Guest") + VirtualMachineGuestOsIdentifierDarwin16_64Guest = VirtualMachineGuestOsIdentifier("darwin16_64Guest") + VirtualMachineGuestOsIdentifierDarwin17_64Guest = VirtualMachineGuestOsIdentifier("darwin17_64Guest") + VirtualMachineGuestOsIdentifierDarwin18_64Guest = VirtualMachineGuestOsIdentifier("darwin18_64Guest") + VirtualMachineGuestOsIdentifierDarwin19_64Guest = VirtualMachineGuestOsIdentifier("darwin19_64Guest") + VirtualMachineGuestOsIdentifierDarwin20_64Guest = VirtualMachineGuestOsIdentifier("darwin20_64Guest") + VirtualMachineGuestOsIdentifierDarwin21_64Guest = VirtualMachineGuestOsIdentifier("darwin21_64Guest") + VirtualMachineGuestOsIdentifierVmkernelGuest = VirtualMachineGuestOsIdentifier("vmkernelGuest") + VirtualMachineGuestOsIdentifierVmkernel5Guest = VirtualMachineGuestOsIdentifier("vmkernel5Guest") + VirtualMachineGuestOsIdentifierVmkernel6Guest = VirtualMachineGuestOsIdentifier("vmkernel6Guest") + VirtualMachineGuestOsIdentifierVmkernel65Guest = VirtualMachineGuestOsIdentifier("vmkernel65Guest") + VirtualMachineGuestOsIdentifierVmkernel7Guest = VirtualMachineGuestOsIdentifier("vmkernel7Guest") + VirtualMachineGuestOsIdentifierAmazonlinux2_64Guest = VirtualMachineGuestOsIdentifier("amazonlinux2_64Guest") + VirtualMachineGuestOsIdentifierAmazonlinux3_64Guest = VirtualMachineGuestOsIdentifier("amazonlinux3_64Guest") + VirtualMachineGuestOsIdentifierCrxPod1Guest = VirtualMachineGuestOsIdentifier("crxPod1Guest") + VirtualMachineGuestOsIdentifierOtherGuest = VirtualMachineGuestOsIdentifier("otherGuest") + VirtualMachineGuestOsIdentifierOtherGuest64 = VirtualMachineGuestOsIdentifier("otherGuest64") ) func init() { @@ -4358,6 +4851,17 @@ func init() { t["VirtualMachineScsiPassthroughType"] = reflect.TypeOf((*VirtualMachineScsiPassthroughType)(nil)).Elem() } +type VirtualMachineSgxInfoFlcModes string + +const ( + VirtualMachineSgxInfoFlcModesLocked = VirtualMachineSgxInfoFlcModes("locked") + VirtualMachineSgxInfoFlcModesUnlocked = VirtualMachineSgxInfoFlcModes("unlocked") +) + +func init() { + t["VirtualMachineSgxInfoFlcModes"] = reflect.TypeOf((*VirtualMachineSgxInfoFlcModes)(nil)).Elem() +} + type VirtualMachineStandbyActionType string const ( @@ -4383,11 +4887,12 @@ func init() { type VirtualMachineTicketType string const ( - VirtualMachineTicketTypeMks = VirtualMachineTicketType("mks") - VirtualMachineTicketTypeDevice = VirtualMachineTicketType("device") - VirtualMachineTicketTypeGuestControl = VirtualMachineTicketType("guestControl") - VirtualMachineTicketTypeWebmks = VirtualMachineTicketType("webmks") - VirtualMachineTicketTypeGuestIntegrity = VirtualMachineTicketType("guestIntegrity") + VirtualMachineTicketTypeMks = VirtualMachineTicketType("mks") + VirtualMachineTicketTypeDevice = VirtualMachineTicketType("device") + VirtualMachineTicketTypeGuestControl = VirtualMachineTicketType("guestControl") + VirtualMachineTicketTypeWebmks = VirtualMachineTicketType("webmks") + VirtualMachineTicketTypeGuestIntegrity = VirtualMachineTicketType("guestIntegrity") + VirtualMachineTicketTypeWebRemoteDevice = VirtualMachineTicketType("webRemoteDevice") ) func init() { @@ -4482,11 +4987,12 @@ func init() { type VirtualMachineUsbInfoSpeed string const ( - VirtualMachineUsbInfoSpeedLow = VirtualMachineUsbInfoSpeed("low") - VirtualMachineUsbInfoSpeedFull = VirtualMachineUsbInfoSpeed("full") - VirtualMachineUsbInfoSpeedHigh = VirtualMachineUsbInfoSpeed("high") - VirtualMachineUsbInfoSpeedSuperSpeed = VirtualMachineUsbInfoSpeed("superSpeed") - VirtualMachineUsbInfoSpeedUnknownSpeed = VirtualMachineUsbInfoSpeed("unknownSpeed") + VirtualMachineUsbInfoSpeedLow = VirtualMachineUsbInfoSpeed("low") + VirtualMachineUsbInfoSpeedFull = VirtualMachineUsbInfoSpeed("full") + VirtualMachineUsbInfoSpeedHigh = VirtualMachineUsbInfoSpeed("high") + VirtualMachineUsbInfoSpeedSuperSpeed = VirtualMachineUsbInfoSpeed("superSpeed") + VirtualMachineUsbInfoSpeedSuperSpeedPlus = VirtualMachineUsbInfoSpeed("superSpeedPlus") + VirtualMachineUsbInfoSpeedUnknownSpeed = VirtualMachineUsbInfoSpeed("unknownSpeed") ) func init() { @@ -4543,6 +5049,17 @@ func init() { t["VirtualMachineVideoCardUse3dRenderer"] = reflect.TypeOf((*VirtualMachineVideoCardUse3dRenderer)(nil)).Elem() } +type VirtualMachineVirtualPMemSnapshotMode string + +const ( + VirtualMachineVirtualPMemSnapshotModeIndependent_persistent = VirtualMachineVirtualPMemSnapshotMode("independent_persistent") + VirtualMachineVirtualPMemSnapshotModeIndependent_eraseonrevert = VirtualMachineVirtualPMemSnapshotMode("independent_eraseonrevert") +) + +func init() { + t["VirtualMachineVirtualPMemSnapshotMode"] = reflect.TypeOf((*VirtualMachineVirtualPMemSnapshotMode)(nil)).Elem() +} + type VirtualMachineWindowsQuiesceSpecVssBackupContext string const ( @@ -4665,6 +5182,7 @@ const ( VmFaultToleranceConfigIssueReasonForIssueHasEFIFirmware = VmFaultToleranceConfigIssueReasonForIssue("hasEFIFirmware") VmFaultToleranceConfigIssueReasonForIssueTooManyVCPUs = VmFaultToleranceConfigIssueReasonForIssue("tooManyVCPUs") VmFaultToleranceConfigIssueReasonForIssueTooMuchMemory = VmFaultToleranceConfigIssueReasonForIssue("tooMuchMemory") + VmFaultToleranceConfigIssueReasonForIssueUnsupportedPMemHAFailOver = VmFaultToleranceConfigIssueReasonForIssue("unsupportedPMemHAFailOver") ) func init() { diff --git a/vendor/github.com/vmware/govmomi/vim25/types/helpers.go b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go index 95c49f333..5e6245e51 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/helpers.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go @@ -17,6 +17,7 @@ limitations under the License. package types import ( + "net/url" "reflect" "strings" "time" @@ -53,7 +54,7 @@ func (r ManagedObjectReference) String() string { func (r *ManagedObjectReference) FromString(o string) bool { s := strings.SplitN(o, ":", 2) - if len(s) < 2 { + if len(s) != 2 { return false } @@ -63,6 +64,11 @@ func (r *ManagedObjectReference) FromString(o string) bool { return true } +// Encode ManagedObjectReference for use with URL and File paths +func (r ManagedObjectReference) Encode() string { + return strings.Join([]string{r.Type, url.QueryEscape(r.Value)}, "-") +} + func (c *PerfCounterInfo) Name() string { return c.GroupInfo.GetElementDescription().Key + "." + c.NameInfo.GetElementDescription().Key + "." + string(c.RollupType) } diff --git a/vendor/github.com/vmware/govmomi/vim25/types/if.go b/vendor/github.com/vmware/govmomi/vim25/types/if.go index 7576198e6..6712beb8e 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/if.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/if.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -214,6 +214,18 @@ func init() { t["BaseClusterAction"] = reflect.TypeOf((*ClusterAction)(nil)).Elem() } +func (b *ClusterComputeResourceValidationResultBase) GetClusterComputeResourceValidationResultBase() *ClusterComputeResourceValidationResultBase { + return b +} + +type BaseClusterComputeResourceValidationResultBase interface { + GetClusterComputeResourceValidationResultBase() *ClusterComputeResourceValidationResultBase +} + +func init() { + t["BaseClusterComputeResourceValidationResultBase"] = reflect.TypeOf((*ClusterComputeResourceValidationResultBase)(nil)).Elem() +} + func (b *ClusterDasAdmissionControlInfo) GetClusterDasAdmissionControlInfo() *ClusterDasAdmissionControlInfo { return b } @@ -786,7 +798,6 @@ func (b *DvsFilterConfig) GetDvsFilterConfig() *DvsFilterConfig { return b } type BaseDvsFilterConfig interface { GetDvsFilterConfig() *DvsFilterConfig - GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig } func init() { @@ -829,21 +840,12 @@ func (b *DvsNetworkRuleQualifier) GetDvsNetworkRuleQualifier() *DvsNetworkRuleQu type BaseDvsNetworkRuleQualifier interface { GetDvsNetworkRuleQualifier() *DvsNetworkRuleQualifier - GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier } func init() { t["BaseDvsNetworkRuleQualifier"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem() } -func (b *DvsIpNetworkRuleQualifier) GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier { - return b -} - -type BaseDvsIpNetworkRuleQualifier interface { - GetDvsIpNetworkRuleQualifier() *DvsIpNetworkRuleQualifier -} - func (b *DvsTrafficFilterConfig) GetDvsTrafficFilterConfig() *DvsTrafficFilterConfig { return b } type BaseDvsTrafficFilterConfig interface { @@ -1254,6 +1256,18 @@ func init() { t["BaseHostDasEvent"] = reflect.TypeOf((*HostDasEvent)(nil)).Elem() } +func (b *HostDataTransportConnectionInfo) GetHostDataTransportConnectionInfo() *HostDataTransportConnectionInfo { + return b +} + +type BaseHostDataTransportConnectionInfo interface { + GetHostDataTransportConnectionInfo() *HostDataTransportConnectionInfo +} + +func init() { + t["BaseHostDataTransportConnectionInfo"] = reflect.TypeOf((*HostDataTransportConnectionInfo)(nil)).Elem() +} + func (b *HostDatastoreConnectInfo) GetHostDatastoreConnectInfo() *HostDatastoreConnectInfo { return b } type BaseHostDatastoreConnectInfo interface { @@ -1356,6 +1370,16 @@ func init() { t["BaseHostHardwareElementInfo"] = reflect.TypeOf((*HostHardwareElementInfo)(nil)).Elem() } +func (b *HostHbaCreateSpec) GetHostHbaCreateSpec() *HostHbaCreateSpec { return b } + +type BaseHostHbaCreateSpec interface { + GetHostHbaCreateSpec() *HostHbaCreateSpec +} + +func init() { + t["BaseHostHbaCreateSpec"] = reflect.TypeOf((*HostHbaCreateSpec)(nil)).Elem() +} + func (b *HostHostBusAdapter) GetHostHostBusAdapter() *HostHostBusAdapter { return b } type BaseHostHostBusAdapter interface { @@ -1412,6 +1436,28 @@ func init() { t["BaseHostMultipathInfoLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitPolicy)(nil)).Elem() } +func (b *HostNvmeSpec) GetHostNvmeSpec() *HostNvmeSpec { return b } + +type BaseHostNvmeSpec interface { + GetHostNvmeSpec() *HostNvmeSpec +} + +func init() { + t["BaseHostNvmeSpec"] = reflect.TypeOf((*HostNvmeSpec)(nil)).Elem() +} + +func (b *HostNvmeTransportParameters) GetHostNvmeTransportParameters() *HostNvmeTransportParameters { + return b +} + +type BaseHostNvmeTransportParameters interface { + GetHostNvmeTransportParameters() *HostNvmeTransportParameters +} + +func init() { + t["BaseHostNvmeTransportParameters"] = reflect.TypeOf((*HostNvmeTransportParameters)(nil)).Elem() +} + func (b *HostPciPassthruConfig) GetHostPciPassthruConfig() *HostPciPassthruConfig { return b } type BaseHostPciPassthruConfig interface { @@ -1464,6 +1510,16 @@ func init() { t["BaseHostProfilesEntityCustomizations"] = reflect.TypeOf((*HostProfilesEntityCustomizations)(nil)).Elem() } +func (b *HostRdmaDeviceBacking) GetHostRdmaDeviceBacking() *HostRdmaDeviceBacking { return b } + +type BaseHostRdmaDeviceBacking interface { + GetHostRdmaDeviceBacking() *HostRdmaDeviceBacking +} + +func init() { + t["BaseHostRdmaDeviceBacking"] = reflect.TypeOf((*HostRdmaDeviceBacking)(nil)).Elem() +} + func (b *HostSriovDevicePoolInfo) GetHostSriovDevicePoolInfo() *HostSriovDevicePoolInfo { return b } type BaseHostSriovDevicePoolInfo interface { @@ -1496,6 +1552,18 @@ func init() { t["BaseHostTargetTransport"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem() } +func (b *HostTpmBootSecurityOptionEventDetails) GetHostTpmBootSecurityOptionEventDetails() *HostTpmBootSecurityOptionEventDetails { + return b +} + +type BaseHostTpmBootSecurityOptionEventDetails interface { + GetHostTpmBootSecurityOptionEventDetails() *HostTpmBootSecurityOptionEventDetails +} + +func init() { + t["BaseHostTpmBootSecurityOptionEventDetails"] = reflect.TypeOf((*HostTpmBootSecurityOptionEventDetails)(nil)).Elem() +} + func (b *HostTpmEventDetails) GetHostTpmEventDetails() *HostTpmEventDetails { return b } type BaseHostTpmEventDetails interface { @@ -2966,6 +3034,18 @@ func init() { t["BaseVirtualHardwareCompatibilityIssue"] = reflect.TypeOf((*VirtualHardwareCompatibilityIssue)(nil)).Elem() } +func (b *VirtualMachineBaseIndependentFilterSpec) GetVirtualMachineBaseIndependentFilterSpec() *VirtualMachineBaseIndependentFilterSpec { + return b +} + +type BaseVirtualMachineBaseIndependentFilterSpec interface { + GetVirtualMachineBaseIndependentFilterSpec() *VirtualMachineBaseIndependentFilterSpec +} + +func init() { + t["BaseVirtualMachineBaseIndependentFilterSpec"] = reflect.TypeOf((*VirtualMachineBaseIndependentFilterSpec)(nil)).Elem() +} + func (b *VirtualMachineBootOptionsBootableDevice) GetVirtualMachineBootOptionsBootableDevice() *VirtualMachineBootOptionsBootableDevice { return b } @@ -2978,6 +3058,16 @@ func init() { t["BaseVirtualMachineBootOptionsBootableDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDevice)(nil)).Elem() } +func (b *VirtualMachineConnection) GetVirtualMachineConnection() *VirtualMachineConnection { return b } + +type BaseVirtualMachineConnection interface { + GetVirtualMachineConnection() *VirtualMachineConnection +} + +func init() { + t["BaseVirtualMachineConnection"] = reflect.TypeOf((*VirtualMachineConnection)(nil)).Elem() +} + func (b *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState) GetVirtualMachineDeviceRuntimeInfoDeviceRuntimeState() *VirtualMachineDeviceRuntimeInfoDeviceRuntimeState { return b } diff --git a/vendor/github.com/vmware/govmomi/vim25/types/types.go b/vendor/github.com/vmware/govmomi/vim25/types/types.go index e990e273e..d9c0f4b59 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/types.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/types.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2021 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +21,23 @@ import ( "time" ) +type AbandonHciWorkflow AbandonHciWorkflowRequestType + +func init() { + t["AbandonHciWorkflow"] = reflect.TypeOf((*AbandonHciWorkflow)(nil)).Elem() +} + +type AbandonHciWorkflowRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["AbandonHciWorkflowRequestType"] = reflect.TypeOf((*AbandonHciWorkflowRequestType)(nil)).Elem() +} + +type AbandonHciWorkflowResponse struct { +} + type AbdicateDomOwnership AbdicateDomOwnershipRequestType func init() { @@ -40,6 +57,26 @@ type AbdicateDomOwnershipResponse struct { Returnval []string `xml:"returnval,omitempty"` } +type AbortCustomizationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["AbortCustomizationRequestType"] = reflect.TypeOf((*AbortCustomizationRequestType)(nil)).Elem() +} + +type AbortCustomization_Task AbortCustomizationRequestType + +func init() { + t["AbortCustomization_Task"] = reflect.TypeOf((*AbortCustomization_Task)(nil)).Elem() +} + +type AbortCustomization_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type AboutInfo struct { DynamicData @@ -47,6 +84,7 @@ type AboutInfo struct { FullName string `xml:"fullName"` Vendor string `xml:"vendor"` Version string `xml:"version"` + PatchLevel string `xml:"patchLevel,omitempty"` Build string `xml:"build"` LocaleVersion string `xml:"localeVersion,omitempty"` LocaleBuild string `xml:"localeBuild,omitempty"` @@ -1028,6 +1066,7 @@ type AlarmState struct { AcknowledgedByUser string `xml:"acknowledgedByUser,omitempty"` AcknowledgedTime *time.Time `xml:"acknowledgedTime"` EventKey int32 `xml:"eventKey,omitempty"` + Disabled *bool `xml:"disabled"` } func init() { @@ -1709,6 +1748,70 @@ func init() { t["ArrayOfClusterAttemptedVmInfo"] = reflect.TypeOf((*ArrayOfClusterAttemptedVmInfo)(nil)).Elem() } +type ArrayOfClusterComputeResourceDVSSetting struct { + ClusterComputeResourceDVSSetting []ClusterComputeResourceDVSSetting `xml:"ClusterComputeResourceDVSSetting,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceDVSSetting"] = reflect.TypeOf((*ArrayOfClusterComputeResourceDVSSetting)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceDVSSettingDVPortgroupToServiceMapping struct { + ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping []ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping `xml:"ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceDVSSettingDVPortgroupToServiceMapping"] = reflect.TypeOf((*ArrayOfClusterComputeResourceDVSSettingDVPortgroupToServiceMapping)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceDvsProfile struct { + ClusterComputeResourceDvsProfile []ClusterComputeResourceDvsProfile `xml:"ClusterComputeResourceDvsProfile,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceDvsProfile"] = reflect.TypeOf((*ArrayOfClusterComputeResourceDvsProfile)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping struct { + ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping []ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping `xml:"ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping"] = reflect.TypeOf((*ArrayOfClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceHostConfigurationInput struct { + ClusterComputeResourceHostConfigurationInput []ClusterComputeResourceHostConfigurationInput `xml:"ClusterComputeResourceHostConfigurationInput,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceHostConfigurationInput"] = reflect.TypeOf((*ArrayOfClusterComputeResourceHostConfigurationInput)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceHostVmkNicInfo struct { + ClusterComputeResourceHostVmkNicInfo []ClusterComputeResourceHostVmkNicInfo `xml:"ClusterComputeResourceHostVmkNicInfo,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceHostVmkNicInfo"] = reflect.TypeOf((*ArrayOfClusterComputeResourceHostVmkNicInfo)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceValidationResultBase struct { + ClusterComputeResourceValidationResultBase []BaseClusterComputeResourceValidationResultBase `xml:"ClusterComputeResourceValidationResultBase,omitempty,typeattr"` +} + +func init() { + t["ArrayOfClusterComputeResourceValidationResultBase"] = reflect.TypeOf((*ArrayOfClusterComputeResourceValidationResultBase)(nil)).Elem() +} + +type ArrayOfClusterComputeResourceVcsSlots struct { + ClusterComputeResourceVcsSlots []ClusterComputeResourceVcsSlots `xml:"ClusterComputeResourceVcsSlots,omitempty"` +} + +func init() { + t["ArrayOfClusterComputeResourceVcsSlots"] = reflect.TypeOf((*ArrayOfClusterComputeResourceVcsSlots)(nil)).Elem() +} + type ArrayOfClusterDasAamNodeState struct { ClusterDasAamNodeState []ClusterDasAamNodeState `xml:"ClusterDasAamNodeState,omitempty"` } @@ -1749,6 +1852,14 @@ func init() { t["ArrayOfClusterDasVmConfigSpec"] = reflect.TypeOf((*ArrayOfClusterDasVmConfigSpec)(nil)).Elem() } +type ArrayOfClusterDatastoreUpdateSpec struct { + ClusterDatastoreUpdateSpec []ClusterDatastoreUpdateSpec `xml:"ClusterDatastoreUpdateSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterDatastoreUpdateSpec"] = reflect.TypeOf((*ArrayOfClusterDatastoreUpdateSpec)(nil)).Elem() +} + type ArrayOfClusterDpmHostConfigInfo struct { ClusterDpmHostConfigInfo []ClusterDpmHostConfigInfo `xml:"ClusterDpmHostConfigInfo,omitempty"` } @@ -1893,6 +2004,14 @@ func init() { t["ArrayOfClusterRuleSpec"] = reflect.TypeOf((*ArrayOfClusterRuleSpec)(nil)).Elem() } +type ArrayOfClusterTagCategoryUpdateSpec struct { + ClusterTagCategoryUpdateSpec []ClusterTagCategoryUpdateSpec `xml:"ClusterTagCategoryUpdateSpec,omitempty"` +} + +func init() { + t["ArrayOfClusterTagCategoryUpdateSpec"] = reflect.TypeOf((*ArrayOfClusterTagCategoryUpdateSpec)(nil)).Elem() +} + type ArrayOfClusterVmOrchestrationInfo struct { ClusterVmOrchestrationInfo []ClusterVmOrchestrationInfo `xml:"ClusterVmOrchestrationInfo,omitempty"` } @@ -1989,6 +2108,14 @@ func init() { t["ArrayOfCryptoManagerKmipClusterStatus"] = reflect.TypeOf((*ArrayOfCryptoManagerKmipClusterStatus)(nil)).Elem() } +type ArrayOfCryptoManagerKmipCryptoKeyStatus struct { + CryptoManagerKmipCryptoKeyStatus []CryptoManagerKmipCryptoKeyStatus `xml:"CryptoManagerKmipCryptoKeyStatus,omitempty"` +} + +func init() { + t["ArrayOfCryptoManagerKmipCryptoKeyStatus"] = reflect.TypeOf((*ArrayOfCryptoManagerKmipCryptoKeyStatus)(nil)).Elem() +} + type ArrayOfCryptoManagerKmipServerStatus struct { CryptoManagerKmipServerStatus []CryptoManagerKmipServerStatus `xml:"CryptoManagerKmipServerStatus,omitempty"` } @@ -2093,6 +2220,14 @@ func init() { t["ArrayOfDasHeartbeatDatastoreInfo"] = reflect.TypeOf((*ArrayOfDasHeartbeatDatastoreInfo)(nil)).Elem() } +type ArrayOfDatacenterBasicConnectInfo struct { + DatacenterBasicConnectInfo []DatacenterBasicConnectInfo `xml:"DatacenterBasicConnectInfo,omitempty"` +} + +func init() { + t["ArrayOfDatacenterBasicConnectInfo"] = reflect.TypeOf((*ArrayOfDatacenterBasicConnectInfo)(nil)).Elem() +} + type ArrayOfDatacenterMismatchArgument struct { DatacenterMismatchArgument []DatacenterMismatchArgument `xml:"DatacenterMismatchArgument,omitempty"` } @@ -2125,6 +2260,14 @@ func init() { t["ArrayOfDatastoreVVolContainerFailoverPair"] = reflect.TypeOf((*ArrayOfDatastoreVVolContainerFailoverPair)(nil)).Elem() } +type ArrayOfDesiredSoftwareSpecComponentSpec struct { + DesiredSoftwareSpecComponentSpec []DesiredSoftwareSpecComponentSpec `xml:"DesiredSoftwareSpecComponentSpec,omitempty"` +} + +func init() { + t["ArrayOfDesiredSoftwareSpecComponentSpec"] = reflect.TypeOf((*ArrayOfDesiredSoftwareSpecComponentSpec)(nil)).Elem() +} + type ArrayOfDiagnosticManagerBundleInfo struct { DiagnosticManagerBundleInfo []DiagnosticManagerBundleInfo `xml:"DiagnosticManagerBundleInfo,omitempty"` } @@ -2165,6 +2308,14 @@ func init() { t["ArrayOfDistributedVirtualPortgroupInfo"] = reflect.TypeOf((*ArrayOfDistributedVirtualPortgroupInfo)(nil)).Elem() } +type ArrayOfDistributedVirtualPortgroupProblem struct { + DistributedVirtualPortgroupProblem []DistributedVirtualPortgroupProblem `xml:"DistributedVirtualPortgroupProblem,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualPortgroupProblem"] = reflect.TypeOf((*ArrayOfDistributedVirtualPortgroupProblem)(nil)).Elem() +} + type ArrayOfDistributedVirtualSwitchHostMember struct { DistributedVirtualSwitchHostMember []DistributedVirtualSwitchHostMember `xml:"DistributedVirtualSwitchHostMember,omitempty"` } @@ -2189,6 +2340,14 @@ func init() { t["ArrayOfDistributedVirtualSwitchHostMemberPnicSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberPnicSpec)(nil)).Elem() } +type ArrayOfDistributedVirtualSwitchHostMemberTransportZoneInfo struct { + DistributedVirtualSwitchHostMemberTransportZoneInfo []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"DistributedVirtualSwitchHostMemberTransportZoneInfo,omitempty"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostMemberTransportZoneInfo"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberTransportZoneInfo)(nil)).Elem() +} + type ArrayOfDistributedVirtualSwitchHostProductSpec struct { DistributedVirtualSwitchHostProductSpec []DistributedVirtualSwitchHostProductSpec `xml:"DistributedVirtualSwitchHostProductSpec,omitempty"` } @@ -2557,6 +2716,14 @@ func init() { t["ArrayOfFcoeConfigVlanRange"] = reflect.TypeOf((*ArrayOfFcoeConfigVlanRange)(nil)).Elem() } +type ArrayOfFeatureEVCMode struct { + FeatureEVCMode []FeatureEVCMode `xml:"FeatureEVCMode,omitempty"` +} + +func init() { + t["ArrayOfFeatureEVCMode"] = reflect.TypeOf((*ArrayOfFeatureEVCMode)(nil)).Elem() +} + type ArrayOfFileInfo struct { FileInfo []BaseFileInfo `xml:"FileInfo,omitempty,typeattr"` } @@ -2581,6 +2748,22 @@ func init() { t["ArrayOfFirewallProfileRulesetProfile"] = reflect.TypeOf((*ArrayOfFirewallProfileRulesetProfile)(nil)).Elem() } +type ArrayOfFolderFailedHostResult struct { + FolderFailedHostResult []FolderFailedHostResult `xml:"FolderFailedHostResult,omitempty"` +} + +func init() { + t["ArrayOfFolderFailedHostResult"] = reflect.TypeOf((*ArrayOfFolderFailedHostResult)(nil)).Elem() +} + +type ArrayOfFolderNewHostSpec struct { + FolderNewHostSpec []FolderNewHostSpec `xml:"FolderNewHostSpec,omitempty"` +} + +func init() { + t["ArrayOfFolderNewHostSpec"] = reflect.TypeOf((*ArrayOfFolderNewHostSpec)(nil)).Elem() +} + type ArrayOfGuestAliases struct { GuestAliases []GuestAliases `xml:"GuestAliases,omitempty"` } @@ -2629,6 +2812,14 @@ func init() { t["ArrayOfGuestInfoNamespaceGenerationInfo"] = reflect.TypeOf((*ArrayOfGuestInfoNamespaceGenerationInfo)(nil)).Elem() } +type ArrayOfGuestInfoVirtualDiskMapping struct { + GuestInfoVirtualDiskMapping []GuestInfoVirtualDiskMapping `xml:"GuestInfoVirtualDiskMapping,omitempty"` +} + +func init() { + t["ArrayOfGuestInfoVirtualDiskMapping"] = reflect.TypeOf((*ArrayOfGuestInfoVirtualDiskMapping)(nil)).Elem() +} + type ArrayOfGuestMappedAliases struct { GuestMappedAliases []GuestMappedAliases `xml:"GuestMappedAliases,omitempty"` } @@ -2733,6 +2924,22 @@ func init() { t["ArrayOfHostActiveDirectory"] = reflect.TypeOf((*ArrayOfHostActiveDirectory)(nil)).Elem() } +type ArrayOfHostAssignableHardwareBinding struct { + HostAssignableHardwareBinding []HostAssignableHardwareBinding `xml:"HostAssignableHardwareBinding,omitempty"` +} + +func init() { + t["ArrayOfHostAssignableHardwareBinding"] = reflect.TypeOf((*ArrayOfHostAssignableHardwareBinding)(nil)).Elem() +} + +type ArrayOfHostAssignableHardwareConfigAttributeOverride struct { + HostAssignableHardwareConfigAttributeOverride []HostAssignableHardwareConfigAttributeOverride `xml:"HostAssignableHardwareConfigAttributeOverride,omitempty"` +} + +func init() { + t["ArrayOfHostAssignableHardwareConfigAttributeOverride"] = reflect.TypeOf((*ArrayOfHostAssignableHardwareConfigAttributeOverride)(nil)).Elem() +} + type ArrayOfHostAuthenticationStoreInfo struct { HostAuthenticationStoreInfo []BaseHostAuthenticationStoreInfo `xml:"HostAuthenticationStoreInfo,omitempty,typeattr"` } @@ -2765,6 +2972,14 @@ func init() { t["ArrayOfHostConnectInfoNetworkInfo"] = reflect.TypeOf((*ArrayOfHostConnectInfoNetworkInfo)(nil)).Elem() } +type ArrayOfHostConnectSpec struct { + HostConnectSpec []HostConnectSpec `xml:"HostConnectSpec,omitempty"` +} + +func init() { + t["ArrayOfHostConnectSpec"] = reflect.TypeOf((*ArrayOfHostConnectSpec)(nil)).Elem() +} + type ArrayOfHostCpuIdInfo struct { HostCpuIdInfo []HostCpuIdInfo `xml:"HostCpuIdInfo,omitempty"` } @@ -3117,6 +3332,14 @@ func init() { t["ArrayOfHostMemberRuntimeInfo"] = reflect.TypeOf((*ArrayOfHostMemberRuntimeInfo)(nil)).Elem() } +type ArrayOfHostMemoryTierInfo struct { + HostMemoryTierInfo []HostMemoryTierInfo `xml:"HostMemoryTierInfo,omitempty"` +} + +func init() { + t["ArrayOfHostMemoryTierInfo"] = reflect.TypeOf((*ArrayOfHostMemoryTierInfo)(nil)).Elem() +} + type ArrayOfHostMultipathInfoLogicalUnit struct { HostMultipathInfoLogicalUnit []HostMultipathInfoLogicalUnit `xml:"HostMultipathInfoLogicalUnit,omitempty"` } @@ -3205,6 +3428,54 @@ func init() { t["ArrayOfHostNumericSensorInfo"] = reflect.TypeOf((*ArrayOfHostNumericSensorInfo)(nil)).Elem() } +type ArrayOfHostNvmeConnectSpec struct { + HostNvmeConnectSpec []HostNvmeConnectSpec `xml:"HostNvmeConnectSpec,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeConnectSpec"] = reflect.TypeOf((*ArrayOfHostNvmeConnectSpec)(nil)).Elem() +} + +type ArrayOfHostNvmeController struct { + HostNvmeController []HostNvmeController `xml:"HostNvmeController,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeController"] = reflect.TypeOf((*ArrayOfHostNvmeController)(nil)).Elem() +} + +type ArrayOfHostNvmeDisconnectSpec struct { + HostNvmeDisconnectSpec []HostNvmeDisconnectSpec `xml:"HostNvmeDisconnectSpec,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeDisconnectSpec"] = reflect.TypeOf((*ArrayOfHostNvmeDisconnectSpec)(nil)).Elem() +} + +type ArrayOfHostNvmeDiscoveryLogEntry struct { + HostNvmeDiscoveryLogEntry []HostNvmeDiscoveryLogEntry `xml:"HostNvmeDiscoveryLogEntry,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeDiscoveryLogEntry"] = reflect.TypeOf((*ArrayOfHostNvmeDiscoveryLogEntry)(nil)).Elem() +} + +type ArrayOfHostNvmeNamespace struct { + HostNvmeNamespace []HostNvmeNamespace `xml:"HostNvmeNamespace,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeNamespace"] = reflect.TypeOf((*ArrayOfHostNvmeNamespace)(nil)).Elem() +} + +type ArrayOfHostNvmeTopologyInterface struct { + HostNvmeTopologyInterface []HostNvmeTopologyInterface `xml:"HostNvmeTopologyInterface,omitempty"` +} + +func init() { + t["ArrayOfHostNvmeTopologyInterface"] = reflect.TypeOf((*ArrayOfHostNvmeTopologyInterface)(nil)).Elem() +} + type ArrayOfHostOpaqueNetworkInfo struct { HostOpaqueNetworkInfo []HostOpaqueNetworkInfo `xml:"HostOpaqueNetworkInfo,omitempty"` } @@ -3389,14 +3660,6 @@ func init() { t["ArrayOfHostProfileManagerCompositionValidationResultResultElement"] = reflect.TypeOf((*ArrayOfHostProfileManagerCompositionValidationResultResultElement)(nil)).Elem() } -type ArrayOfHostProfileManagerHostToConfigSpecMap struct { - HostProfileManagerHostToConfigSpecMap []HostProfileManagerHostToConfigSpecMap `xml:"HostProfileManagerHostToConfigSpecMap,omitempty"` -} - -func init() { - t["ArrayOfHostProfileManagerHostToConfigSpecMap"] = reflect.TypeOf((*ArrayOfHostProfileManagerHostToConfigSpecMap)(nil)).Elem() -} - type ArrayOfHostProfilesEntityCustomizations struct { HostProfilesEntityCustomizations []BaseHostProfilesEntityCustomizations `xml:"HostProfilesEntityCustomizations,omitempty,typeattr"` } @@ -3437,6 +3700,30 @@ func init() { t["ArrayOfHostProxySwitchHostLagConfig"] = reflect.TypeOf((*ArrayOfHostProxySwitchHostLagConfig)(nil)).Elem() } +type ArrayOfHostPtpConfigPtpPort struct { + HostPtpConfigPtpPort []HostPtpConfigPtpPort `xml:"HostPtpConfigPtpPort,omitempty"` +} + +func init() { + t["ArrayOfHostPtpConfigPtpPort"] = reflect.TypeOf((*ArrayOfHostPtpConfigPtpPort)(nil)).Elem() +} + +type ArrayOfHostQualifiedName struct { + HostQualifiedName []HostQualifiedName `xml:"HostQualifiedName,omitempty"` +} + +func init() { + t["ArrayOfHostQualifiedName"] = reflect.TypeOf((*ArrayOfHostQualifiedName)(nil)).Elem() +} + +type ArrayOfHostRdmaDevice struct { + HostRdmaDevice []HostRdmaDevice `xml:"HostRdmaDevice,omitempty"` +} + +func init() { + t["ArrayOfHostRdmaDevice"] = reflect.TypeOf((*ArrayOfHostRdmaDevice)(nil)).Elem() +} + type ArrayOfHostRuntimeInfoNetStackInstanceRuntimeInfo struct { HostRuntimeInfoNetStackInstanceRuntimeInfo []HostRuntimeInfoNetStackInstanceRuntimeInfo `xml:"HostRuntimeInfoNetStackInstanceRuntimeInfo,omitempty"` } @@ -3629,6 +3916,14 @@ func init() { t["ArrayOfHostTpmEventLogEntry"] = reflect.TypeOf((*ArrayOfHostTpmEventLogEntry)(nil)).Elem() } +type ArrayOfHostTrustAuthorityAttestationInfo struct { + HostTrustAuthorityAttestationInfo []HostTrustAuthorityAttestationInfo `xml:"HostTrustAuthorityAttestationInfo,omitempty"` +} + +func init() { + t["ArrayOfHostTrustAuthorityAttestationInfo"] = reflect.TypeOf((*ArrayOfHostTrustAuthorityAttestationInfo)(nil)).Elem() +} + type ArrayOfHostUnresolvedVmfsExtent struct { HostUnresolvedVmfsExtent []HostUnresolvedVmfsExtent `xml:"HostUnresolvedVmfsExtent,omitempty"` } @@ -3797,6 +4092,14 @@ func init() { t["ArrayOfHttpNfcLeaseManifestEntry"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseManifestEntry)(nil)).Elem() } +type ArrayOfHttpNfcLeaseProbeResult struct { + HttpNfcLeaseProbeResult []HttpNfcLeaseProbeResult `xml:"HttpNfcLeaseProbeResult,omitempty"` +} + +func init() { + t["ArrayOfHttpNfcLeaseProbeResult"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseProbeResult)(nil)).Elem() +} + type ArrayOfHttpNfcLeaseSourceFile struct { HttpNfcLeaseSourceFile []HttpNfcLeaseSourceFile `xml:"HttpNfcLeaseSourceFile,omitempty"` } @@ -4149,6 +4452,14 @@ func init() { t["ArrayOfNvdimmInterleaveSetInfo"] = reflect.TypeOf((*ArrayOfNvdimmInterleaveSetInfo)(nil)).Elem() } +type ArrayOfNvdimmNamespaceDetails struct { + NvdimmNamespaceDetails []NvdimmNamespaceDetails `xml:"NvdimmNamespaceDetails,omitempty"` +} + +func init() { + t["ArrayOfNvdimmNamespaceDetails"] = reflect.TypeOf((*ArrayOfNvdimmNamespaceDetails)(nil)).Elem() +} + type ArrayOfNvdimmNamespaceInfo struct { NvdimmNamespaceInfo []NvdimmNamespaceInfo `xml:"NvdimmNamespaceInfo,omitempty"` } @@ -4909,6 +5220,14 @@ func init() { t["ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo"] = reflect.TypeOf((*ArrayOfUpdateVirtualMachineFilesResultFailedVmFileInfo)(nil)).Elem() } +type ArrayOfUri struct { + Uri []string `xml:"uri,omitempty"` +} + +func init() { + t["ArrayOfUri"] = reflect.TypeOf((*ArrayOfUri)(nil)).Elem() +} + type ArrayOfUsbScanCodeSpecKeyEvent struct { UsbScanCodeSpecKeyEvent []UsbScanCodeSpecKeyEvent `xml:"UsbScanCodeSpecKeyEvent,omitempty"` } @@ -5221,6 +5540,14 @@ func init() { t["ArrayOfVirtualDiskRuleSpec"] = reflect.TypeOf((*ArrayOfVirtualDiskRuleSpec)(nil)).Elem() } +type ArrayOfVirtualMachineBaseIndependentFilterSpec struct { + VirtualMachineBaseIndependentFilterSpec []BaseVirtualMachineBaseIndependentFilterSpec `xml:"VirtualMachineBaseIndependentFilterSpec,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachineBaseIndependentFilterSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineBaseIndependentFilterSpec)(nil)).Elem() +} + type ArrayOfVirtualMachineBootOptionsBootableDevice struct { VirtualMachineBootOptionsBootableDevice []BaseVirtualMachineBootOptionsBootableDevice `xml:"VirtualMachineBootOptionsBootableDevice,omitempty,typeattr"` } @@ -5253,6 +5580,22 @@ func init() { t["ArrayOfVirtualMachineConfigOptionDescriptor"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigOptionDescriptor)(nil)).Elem() } +type ArrayOfVirtualMachineConfigSpec struct { + VirtualMachineConfigSpec []VirtualMachineConfigSpec `xml:"VirtualMachineConfigSpec,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineConfigSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineConfigSpec)(nil)).Elem() +} + +type ArrayOfVirtualMachineConnection struct { + VirtualMachineConnection []BaseVirtualMachineConnection `xml:"VirtualMachineConnection,omitempty,typeattr"` +} + +func init() { + t["ArrayOfVirtualMachineConnection"] = reflect.TypeOf((*ArrayOfVirtualMachineConnection)(nil)).Elem() +} + type ArrayOfVirtualMachineCpuIdInfoSpec struct { VirtualMachineCpuIdInfoSpec []VirtualMachineCpuIdInfoSpec `xml:"VirtualMachineCpuIdInfoSpec,omitempty"` } @@ -5293,6 +5636,14 @@ func init() { t["ArrayOfVirtualMachineDisplayTopology"] = reflect.TypeOf((*ArrayOfVirtualMachineDisplayTopology)(nil)).Elem() } +type ArrayOfVirtualMachineDynamicPassthroughInfo struct { + VirtualMachineDynamicPassthroughInfo []VirtualMachineDynamicPassthroughInfo `xml:"VirtualMachineDynamicPassthroughInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineDynamicPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineDynamicPassthroughInfo)(nil)).Elem() +} + type ArrayOfVirtualMachineFeatureRequirement struct { VirtualMachineFeatureRequirement []VirtualMachineFeatureRequirement `xml:"VirtualMachineFeatureRequirement,omitempty"` } @@ -5437,6 +5788,14 @@ func init() { t["ArrayOfVirtualMachinePciSharedGpuPassthroughInfo"] = reflect.TypeOf((*ArrayOfVirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem() } +type ArrayOfVirtualMachinePrecisionClockInfo struct { + VirtualMachinePrecisionClockInfo []VirtualMachinePrecisionClockInfo `xml:"VirtualMachinePrecisionClockInfo,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachinePrecisionClockInfo"] = reflect.TypeOf((*ArrayOfVirtualMachinePrecisionClockInfo)(nil)).Elem() +} + type ArrayOfVirtualMachineProfileDetailsDiskProfileDetails struct { VirtualMachineProfileDetailsDiskProfileDetails []VirtualMachineProfileDetailsDiskProfileDetails `xml:"VirtualMachineProfileDetailsDiskProfileDetails,omitempty"` } @@ -5461,6 +5820,14 @@ func init() { t["ArrayOfVirtualMachinePropertyRelation"] = reflect.TypeOf((*ArrayOfVirtualMachinePropertyRelation)(nil)).Elem() } +type ArrayOfVirtualMachineQuickStatsMemoryTierStats struct { + VirtualMachineQuickStatsMemoryTierStats []VirtualMachineQuickStatsMemoryTierStats `xml:"VirtualMachineQuickStatsMemoryTierStats,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineQuickStatsMemoryTierStats"] = reflect.TypeOf((*ArrayOfVirtualMachineQuickStatsMemoryTierStats)(nil)).Elem() +} + type ArrayOfVirtualMachineRelocateSpecDiskLocator struct { VirtualMachineRelocateSpecDiskLocator []VirtualMachineRelocateSpecDiskLocator `xml:"VirtualMachineRelocateSpecDiskLocator,omitempty"` } @@ -5557,6 +5924,14 @@ func init() { t["ArrayOfVirtualMachineVMCIDeviceFilterSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineVMCIDeviceFilterSpec)(nil)).Elem() } +type ArrayOfVirtualMachineVcpuConfig struct { + VirtualMachineVcpuConfig []VirtualMachineVcpuConfig `xml:"VirtualMachineVcpuConfig,omitempty"` +} + +func init() { + t["ArrayOfVirtualMachineVcpuConfig"] = reflect.TypeOf((*ArrayOfVirtualMachineVcpuConfig)(nil)).Elem() +} + type ArrayOfVirtualNicManagerNetConfig struct { VirtualNicManagerNetConfig []VirtualNicManagerNetConfig `xml:"VirtualNicManagerNetConfig,omitempty"` } @@ -5565,6 +5940,14 @@ func init() { t["ArrayOfVirtualNicManagerNetConfig"] = reflect.TypeOf((*ArrayOfVirtualNicManagerNetConfig)(nil)).Elem() } +type ArrayOfVirtualPCIPassthroughAllowedDevice struct { + VirtualPCIPassthroughAllowedDevice []VirtualPCIPassthroughAllowedDevice `xml:"VirtualPCIPassthroughAllowedDevice,omitempty"` +} + +func init() { + t["ArrayOfVirtualPCIPassthroughAllowedDevice"] = reflect.TypeOf((*ArrayOfVirtualPCIPassthroughAllowedDevice)(nil)).Elem() +} + type ArrayOfVirtualSCSISharing struct { VirtualSCSISharing []VirtualSCSISharing `xml:"VirtualSCSISharing,omitempty"` } @@ -6112,6 +6495,8 @@ type BaseConfigInfo struct { NativeSnapshotSupported *bool `xml:"nativeSnapshotSupported"` ChangedBlockTrackingEnabled *bool `xml:"changedBlockTrackingEnabled"` Backing BaseBaseConfigInfoBackingInfo `xml:"backing,typeattr"` + Metadata []KeyValue `xml:"metadata,omitempty"` + Vclock *VslmVClockInfo `xml:"vclock,omitempty"` Iofilter []string `xml:"iofilter,omitempty"` } @@ -6146,6 +6531,7 @@ type BaseConfigInfoFileBackingInfo struct { BackingObjectId string `xml:"backingObjectId,omitempty"` Parent BaseBaseConfigInfoFileBackingInfo `xml:"parent,omitempty,typeattr"` DeltaSizeInMB int64 `xml:"deltaSizeInMB,omitempty"` + KeyId *CryptoKeyId `xml:"keyId,omitempty"` } func init() { @@ -6163,6 +6549,69 @@ func init() { t["BaseConfigInfoRawDiskMappingBackingInfo"] = reflect.TypeOf((*BaseConfigInfoRawDiskMappingBackingInfo)(nil)).Elem() } +type BatchAddHostsToClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster ManagedObjectReference `xml:"cluster"` + NewHosts []FolderNewHostSpec `xml:"newHosts,omitempty"` + ExistingHosts []ManagedObjectReference `xml:"existingHosts,omitempty"` + CompResSpec BaseComputeResourceConfigSpec `xml:"compResSpec,omitempty,typeattr"` + DesiredState string `xml:"desiredState,omitempty"` +} + +func init() { + t["BatchAddHostsToClusterRequestType"] = reflect.TypeOf((*BatchAddHostsToClusterRequestType)(nil)).Elem() +} + +type BatchAddHostsToCluster_Task BatchAddHostsToClusterRequestType + +func init() { + t["BatchAddHostsToCluster_Task"] = reflect.TypeOf((*BatchAddHostsToCluster_Task)(nil)).Elem() +} + +type BatchAddHostsToCluster_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type BatchAddStandaloneHostsRequestType struct { + This ManagedObjectReference `xml:"_this"` + NewHosts []FolderNewHostSpec `xml:"newHosts,omitempty"` + CompResSpec BaseComputeResourceConfigSpec `xml:"compResSpec,omitempty,typeattr"` + AddConnected bool `xml:"addConnected"` +} + +func init() { + t["BatchAddStandaloneHostsRequestType"] = reflect.TypeOf((*BatchAddStandaloneHostsRequestType)(nil)).Elem() +} + +type BatchAddStandaloneHosts_Task BatchAddStandaloneHostsRequestType + +func init() { + t["BatchAddStandaloneHosts_Task"] = reflect.TypeOf((*BatchAddStandaloneHosts_Task)(nil)).Elem() +} + +type BatchAddStandaloneHosts_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type BatchQueryConnectInfo BatchQueryConnectInfoRequestType + +func init() { + t["BatchQueryConnectInfo"] = reflect.TypeOf((*BatchQueryConnectInfo)(nil)).Elem() +} + +type BatchQueryConnectInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` + HostSpecs []HostConnectSpec `xml:"hostSpecs,omitempty"` +} + +func init() { + t["BatchQueryConnectInfoRequestType"] = reflect.TypeOf((*BatchQueryConnectInfoRequestType)(nil)).Elem() +} + +type BatchQueryConnectInfoResponse struct { + Returnval []DatacenterBasicConnectInfo `xml:"returnval,omitempty"` +} + type BatchResult struct { DynamicData @@ -6873,12 +7322,20 @@ func init() { type Capability struct { DynamicData - ProvisioningSupported bool `xml:"provisioningSupported"` - MultiHostSupported bool `xml:"multiHostSupported"` - UserShellAccessSupported bool `xml:"userShellAccessSupported"` - SupportedEVCMode []EVCMode `xml:"supportedEVCMode,omitempty"` - NetworkBackupAndRestoreSupported *bool `xml:"networkBackupAndRestoreSupported"` - FtDrsWithoutEvcSupported *bool `xml:"ftDrsWithoutEvcSupported"` + ProvisioningSupported bool `xml:"provisioningSupported"` + MultiHostSupported bool `xml:"multiHostSupported"` + UserShellAccessSupported bool `xml:"userShellAccessSupported"` + SupportedEVCMode []EVCMode `xml:"supportedEVCMode,omitempty"` + SupportedEVCGraphicsMode []FeatureEVCMode `xml:"supportedEVCGraphicsMode,omitempty"` + NetworkBackupAndRestoreSupported *bool `xml:"networkBackupAndRestoreSupported"` + FtDrsWithoutEvcSupported *bool `xml:"ftDrsWithoutEvcSupported"` + HciWorkflowSupported *bool `xml:"hciWorkflowSupported"` + ComputePolicyVersion int32 `xml:"computePolicyVersion,omitempty"` + ClusterPlacementSupported *bool `xml:"clusterPlacementSupported"` + LifecycleManagementSupported *bool `xml:"lifecycleManagementSupported"` + HostSeedingSupported *bool `xml:"hostSeedingSupported"` + ScalableSharesSupported *bool `xml:"scalableSharesSupported"` + HadcsSupported *bool `xml:"hadcsSupported"` } func init() { @@ -7058,6 +7515,26 @@ func init() { type ChangeOwnerResponse struct { } +type ChangePassword ChangePasswordRequestType + +func init() { + t["ChangePassword"] = reflect.TypeOf((*ChangePassword)(nil)).Elem() +} + +type ChangePasswordRequestType struct { + This ManagedObjectReference `xml:"_this"` + User string `xml:"user"` + OldPassword string `xml:"oldPassword"` + NewPassword string `xml:"newPassword"` +} + +func init() { + t["ChangePasswordRequestType"] = reflect.TypeOf((*ChangePasswordRequestType)(nil)).Elem() +} + +type ChangePasswordResponse struct { +} + type ChangesInfoEventArgument struct { DynamicData @@ -7174,8 +7651,9 @@ type CheckCompliance_TaskResponse struct { } type CheckConfigureEvcModeRequestType struct { - This ManagedObjectReference `xml:"_this"` - EvcModeKey string `xml:"evcModeKey"` + This ManagedObjectReference `xml:"_this"` + EvcModeKey string `xml:"evcModeKey"` + EvcGraphicsModeKey string `xml:"evcGraphicsModeKey,omitempty"` } func init() { @@ -7724,23 +8202,208 @@ func init() { t["ClusterComplianceCheckedEvent"] = reflect.TypeOf((*ClusterComplianceCheckedEvent)(nil)).Elem() } +type ClusterComputeResourceClusterConfigResult struct { + DynamicData + + FailedHosts []FolderFailedHostResult `xml:"failedHosts,omitempty"` + ConfiguredHosts []ManagedObjectReference `xml:"configuredHosts,omitempty"` +} + +func init() { + t["ClusterComputeResourceClusterConfigResult"] = reflect.TypeOf((*ClusterComputeResourceClusterConfigResult)(nil)).Elem() +} + +type ClusterComputeResourceDVSConfigurationValidation struct { + ClusterComputeResourceValidationResultBase + + IsDvsValid bool `xml:"isDvsValid"` + IsDvpgValid bool `xml:"isDvpgValid"` +} + +func init() { + t["ClusterComputeResourceDVSConfigurationValidation"] = reflect.TypeOf((*ClusterComputeResourceDVSConfigurationValidation)(nil)).Elem() +} + +type ClusterComputeResourceDVSSetting struct { + DynamicData + + DvSwitch ManagedObjectReference `xml:"dvSwitch"` + PnicDevices []string `xml:"pnicDevices,omitempty"` + DvPortgroupSetting []ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping `xml:"dvPortgroupSetting,omitempty"` +} + +func init() { + t["ClusterComputeResourceDVSSetting"] = reflect.TypeOf((*ClusterComputeResourceDVSSetting)(nil)).Elem() +} + +type ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping struct { + DynamicData + + DvPortgroup ManagedObjectReference `xml:"dvPortgroup"` + Service string `xml:"service"` +} + +func init() { + t["ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping"] = reflect.TypeOf((*ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping)(nil)).Elem() +} + +type ClusterComputeResourceDvsProfile struct { + DynamicData + + DvsName string `xml:"dvsName,omitempty"` + DvSwitch *ManagedObjectReference `xml:"dvSwitch,omitempty"` + PnicDevices []string `xml:"pnicDevices,omitempty"` + DvPortgroupMapping []ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping `xml:"dvPortgroupMapping,omitempty"` +} + +func init() { + t["ClusterComputeResourceDvsProfile"] = reflect.TypeOf((*ClusterComputeResourceDvsProfile)(nil)).Elem() +} + +type ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping struct { + DynamicData + + DvPortgroupSpec *DVPortgroupConfigSpec `xml:"dvPortgroupSpec,omitempty"` + DvPortgroup *ManagedObjectReference `xml:"dvPortgroup,omitempty"` + Service string `xml:"service"` +} + +func init() { + t["ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping"] = reflect.TypeOf((*ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping)(nil)).Elem() +} + +type ClusterComputeResourceHCIConfigInfo struct { + DynamicData + + WorkflowState string `xml:"workflowState"` + DvsSetting []ClusterComputeResourceDVSSetting `xml:"dvsSetting,omitempty"` + ConfiguredHosts []ManagedObjectReference `xml:"configuredHosts,omitempty"` + HostConfigProfile *ClusterComputeResourceHostConfigurationProfile `xml:"hostConfigProfile,omitempty"` +} + +func init() { + t["ClusterComputeResourceHCIConfigInfo"] = reflect.TypeOf((*ClusterComputeResourceHCIConfigInfo)(nil)).Elem() +} + +type ClusterComputeResourceHCIConfigSpec struct { + DynamicData + + DvsProf []ClusterComputeResourceDvsProfile `xml:"dvsProf,omitempty"` + HostConfigProfile *ClusterComputeResourceHostConfigurationProfile `xml:"hostConfigProfile,omitempty"` + VSanConfigSpec *SDDCBase `xml:"vSanConfigSpec,omitempty"` + VcProf *ClusterComputeResourceVCProfile `xml:"vcProf,omitempty"` +} + +func init() { + t["ClusterComputeResourceHCIConfigSpec"] = reflect.TypeOf((*ClusterComputeResourceHCIConfigSpec)(nil)).Elem() +} + +type ClusterComputeResourceHostConfigurationInput struct { + DynamicData + + Host ManagedObjectReference `xml:"host"` + HostVmkNics []ClusterComputeResourceHostVmkNicInfo `xml:"hostVmkNics,omitempty"` + AllowedInNonMaintenanceMode *bool `xml:"allowedInNonMaintenanceMode"` +} + +func init() { + t["ClusterComputeResourceHostConfigurationInput"] = reflect.TypeOf((*ClusterComputeResourceHostConfigurationInput)(nil)).Elem() +} + +type ClusterComputeResourceHostConfigurationProfile struct { + DynamicData + + DateTimeConfig *HostDateTimeConfig `xml:"dateTimeConfig,omitempty"` + LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty"` +} + +func init() { + t["ClusterComputeResourceHostConfigurationProfile"] = reflect.TypeOf((*ClusterComputeResourceHostConfigurationProfile)(nil)).Elem() +} + +type ClusterComputeResourceHostConfigurationValidation struct { + ClusterComputeResourceValidationResultBase + + Host ManagedObjectReference `xml:"host"` + IsDvsSettingValid *bool `xml:"isDvsSettingValid"` + IsVmknicSettingValid *bool `xml:"isVmknicSettingValid"` + IsNtpSettingValid *bool `xml:"isNtpSettingValid"` + IsLockdownModeValid *bool `xml:"isLockdownModeValid"` +} + +func init() { + t["ClusterComputeResourceHostConfigurationValidation"] = reflect.TypeOf((*ClusterComputeResourceHostConfigurationValidation)(nil)).Elem() +} + +type ClusterComputeResourceHostVmkNicInfo struct { + DynamicData + + NicSpec HostVirtualNicSpec `xml:"nicSpec"` + Service string `xml:"service"` +} + +func init() { + t["ClusterComputeResourceHostVmkNicInfo"] = reflect.TypeOf((*ClusterComputeResourceHostVmkNicInfo)(nil)).Elem() +} + type ClusterComputeResourceSummary struct { ComputeResourceSummary - CurrentFailoverLevel int32 `xml:"currentFailoverLevel"` - AdmissionControlInfo BaseClusterDasAdmissionControlInfo `xml:"admissionControlInfo,omitempty,typeattr"` - NumVmotions int32 `xml:"numVmotions"` - TargetBalance int32 `xml:"targetBalance,omitempty"` - CurrentBalance int32 `xml:"currentBalance,omitempty"` - UsageSummary *ClusterUsageSummary `xml:"usageSummary,omitempty"` - CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` - DasData BaseClusterDasData `xml:"dasData,omitempty,typeattr"` + CurrentFailoverLevel int32 `xml:"currentFailoverLevel"` + AdmissionControlInfo BaseClusterDasAdmissionControlInfo `xml:"admissionControlInfo,omitempty,typeattr"` + NumVmotions int32 `xml:"numVmotions"` + TargetBalance int32 `xml:"targetBalance,omitempty"` + CurrentBalance int32 `xml:"currentBalance,omitempty"` + DrsScore int32 `xml:"drsScore,omitempty"` + NumVmsPerDrsScoreBucket []int32 `xml:"numVmsPerDrsScoreBucket,omitempty"` + UsageSummary *ClusterUsageSummary `xml:"usageSummary,omitempty"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + CurrentEVCGraphicsModeKey string `xml:"currentEVCGraphicsModeKey,omitempty"` + DasData BaseClusterDasData `xml:"dasData,omitempty,typeattr"` + ClusterMaintenanceModeStatus string `xml:"clusterMaintenanceModeStatus,omitempty"` + VcsHealthStatus string `xml:"vcsHealthStatus,omitempty"` + VcsSlots []ClusterComputeResourceVcsSlots `xml:"vcsSlots,omitempty"` } func init() { t["ClusterComputeResourceSummary"] = reflect.TypeOf((*ClusterComputeResourceSummary)(nil)).Elem() } +type ClusterComputeResourceVCProfile struct { + DynamicData + + ClusterSpec *ClusterConfigSpecEx `xml:"clusterSpec,omitempty"` + EvcModeKey string `xml:"evcModeKey,omitempty"` + EvcGraphicsModeKey string `xml:"evcGraphicsModeKey,omitempty"` +} + +func init() { + t["ClusterComputeResourceVCProfile"] = reflect.TypeOf((*ClusterComputeResourceVCProfile)(nil)).Elem() +} + +type ClusterComputeResourceValidationResultBase struct { + DynamicData + + Info []LocalizableMessage `xml:"info,omitempty"` +} + +func init() { + t["ClusterComputeResourceValidationResultBase"] = reflect.TypeOf((*ClusterComputeResourceValidationResultBase)(nil)).Elem() +} + +type ClusterComputeResourceVcsSlots struct { + DynamicData + + SystemId string `xml:"systemId,omitempty"` + Host ManagedObjectReference `xml:"host"` + Datastore []ManagedObjectReference `xml:"datastore,omitempty"` + TotalSlots int32 `xml:"totalSlots"` +} + +func init() { + t["ClusterComputeResourceVcsSlots"] = reflect.TypeOf((*ClusterComputeResourceVcsSlots)(nil)).Elem() +} + type ClusterConfigInfo struct { DynamicData @@ -7758,6 +8421,7 @@ func init() { type ClusterConfigInfoEx struct { ComputeResourceConfigInfo + SystemVMsConfig *ClusterSystemVMsConfigInfo `xml:"systemVMsConfig,omitempty"` DasConfig ClusterDasConfigInfo `xml:"dasConfig"` DasVmConfig []ClusterDasVmConfigInfo `xml:"dasVmConfig,omitempty"` DrsConfig ClusterDrsConfigInfo `xml:"drsConfig"` @@ -7772,6 +8436,7 @@ type ClusterConfigInfoEx struct { Group []BaseClusterGroupInfo `xml:"group,omitempty,typeattr"` InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty"` ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty"` + CryptoConfig *ClusterCryptoConfigInfo `xml:"cryptoConfig,omitempty"` } func init() { @@ -7795,6 +8460,7 @@ func init() { type ClusterConfigSpecEx struct { ComputeResourceConfigSpec + SystemVMsConfig *ClusterSystemVMsConfigSpec `xml:"systemVMsConfig,omitempty"` DasConfig *ClusterDasConfigInfo `xml:"dasConfig,omitempty"` DasVmConfigSpec []ClusterDasVmConfigSpec `xml:"dasVmConfigSpec,omitempty"` DrsConfig *ClusterDrsConfigInfo `xml:"drsConfig,omitempty"` @@ -7809,6 +8475,8 @@ type ClusterConfigSpecEx struct { GroupSpec []ClusterGroupSpec `xml:"groupSpec,omitempty"` InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty"` ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty"` + InHciWorkflow *bool `xml:"inHciWorkflow"` + CryptoConfig *ClusterCryptoConfigInfo `xml:"cryptoConfig,omitempty"` } func init() { @@ -7825,6 +8493,16 @@ func init() { t["ClusterCreatedEvent"] = reflect.TypeOf((*ClusterCreatedEvent)(nil)).Elem() } +type ClusterCryptoConfigInfo struct { + DynamicData + + CryptoMode string `xml:"cryptoMode,omitempty"` +} + +func init() { + t["ClusterCryptoConfigInfo"] = reflect.TypeOf((*ClusterCryptoConfigInfo)(nil)).Elem() +} + type ClusterDasAamHostInfo struct { ClusterDasHostInfo @@ -7860,7 +8538,8 @@ func init() { type ClusterDasAdmissionControlPolicy struct { DynamicData - ResourceReductionToToleratePercent int32 `xml:"resourceReductionToToleratePercent,omitempty"` + ResourceReductionToToleratePercent *int32 `xml:"resourceReductionToToleratePercent"` + PMemAdmissionControlEnabled *bool `xml:"pMemAdmissionControlEnabled"` } func init() { @@ -8049,6 +8728,16 @@ func init() { t["ClusterDasVmSettings"] = reflect.TypeOf((*ClusterDasVmSettings)(nil)).Elem() } +type ClusterDatastoreUpdateSpec struct { + ArrayUpdateSpec + + Datastore *ManagedObjectReference `xml:"datastore,omitempty"` +} + +func init() { + t["ClusterDatastoreUpdateSpec"] = reflect.TypeOf((*ClusterDatastoreUpdateSpec)(nil)).Elem() +} + type ClusterDependencyRuleInfo struct { ClusterRuleInfo @@ -8110,6 +8799,7 @@ type ClusterDrsConfigInfo struct { EnableVmBehaviorOverrides *bool `xml:"enableVmBehaviorOverrides"` DefaultVmBehavior DrsBehavior `xml:"defaultVmBehavior,omitempty"` VmotionRate int32 `xml:"vmotionRate,omitempty"` + ScaleDescendantsShares string `xml:"scaleDescendantsShares,omitempty"` Option []BaseOptionValue `xml:"option,omitempty,typeattr"` } @@ -8329,6 +9019,7 @@ type ClusterFailoverResourcesAdmissionControlInfo struct { CurrentCpuFailoverResourcesPercent int32 `xml:"currentCpuFailoverResourcesPercent"` CurrentMemoryFailoverResourcesPercent int32 `xml:"currentMemoryFailoverResourcesPercent"` + CurrentPMemFailoverResourcesPercent int32 `xml:"currentPMemFailoverResourcesPercent,omitempty"` } func init() { @@ -8338,10 +9029,12 @@ func init() { type ClusterFailoverResourcesAdmissionControlPolicy struct { ClusterDasAdmissionControlPolicy - CpuFailoverResourcesPercent int32 `xml:"cpuFailoverResourcesPercent"` - MemoryFailoverResourcesPercent int32 `xml:"memoryFailoverResourcesPercent"` - FailoverLevel int32 `xml:"failoverLevel,omitempty"` - AutoComputePercentages *bool `xml:"autoComputePercentages"` + CpuFailoverResourcesPercent int32 `xml:"cpuFailoverResourcesPercent"` + MemoryFailoverResourcesPercent int32 `xml:"memoryFailoverResourcesPercent"` + FailoverLevel int32 `xml:"failoverLevel,omitempty"` + AutoComputePercentages *bool `xml:"autoComputePercentages"` + PMemFailoverResourcesPercent int32 `xml:"pMemFailoverResourcesPercent,omitempty"` + AutoComputePMemFailoverResourcesPercent *bool `xml:"autoComputePMemFailoverResourcesPercent"` } func init() { @@ -8672,6 +9365,40 @@ func init() { t["ClusterStatusChangedEvent"] = reflect.TypeOf((*ClusterStatusChangedEvent)(nil)).Elem() } +type ClusterSystemVMsConfigInfo struct { + DynamicData + + AllowedDatastores []ManagedObjectReference `xml:"allowedDatastores,omitempty"` + NotAllowedDatastores []ManagedObjectReference `xml:"notAllowedDatastores,omitempty"` + DsTagCategoriesToExclude []string `xml:"dsTagCategoriesToExclude,omitempty"` +} + +func init() { + t["ClusterSystemVMsConfigInfo"] = reflect.TypeOf((*ClusterSystemVMsConfigInfo)(nil)).Elem() +} + +type ClusterSystemVMsConfigSpec struct { + DynamicData + + AllowedDatastores []ClusterDatastoreUpdateSpec `xml:"allowedDatastores,omitempty"` + NotAllowedDatastores []ClusterDatastoreUpdateSpec `xml:"notAllowedDatastores,omitempty"` + DsTagCategoriesToExclude []ClusterTagCategoryUpdateSpec `xml:"dsTagCategoriesToExclude,omitempty"` +} + +func init() { + t["ClusterSystemVMsConfigSpec"] = reflect.TypeOf((*ClusterSystemVMsConfigSpec)(nil)).Elem() +} + +type ClusterTagCategoryUpdateSpec struct { + ArrayUpdateSpec + + Category string `xml:"category,omitempty"` +} + +func init() { + t["ClusterTagCategoryUpdateSpec"] = reflect.TypeOf((*ClusterTagCategoryUpdateSpec)(nil)).Elem() +} + type ClusterUsageSummary struct { DynamicData @@ -8936,6 +9663,7 @@ type ComputeResourceConfigInfo struct { VmSwapPlacement string `xml:"vmSwapPlacement"` SpbmEnabled *bool `xml:"spbmEnabled"` DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + MaximumHardwareVersionKey string `xml:"maximumHardwareVersionKey,omitempty"` } func init() { @@ -8945,9 +9673,11 @@ func init() { type ComputeResourceConfigSpec struct { DynamicData - VmSwapPlacement string `xml:"vmSwapPlacement,omitempty"` - SpbmEnabled *bool `xml:"spbmEnabled"` - DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + VmSwapPlacement string `xml:"vmSwapPlacement,omitempty"` + SpbmEnabled *bool `xml:"spbmEnabled"` + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + DesiredSoftwareSpec *DesiredSoftwareSpec `xml:"desiredSoftwareSpec,omitempty"` + MaximumHardwareVersionKey string `xml:"maximumHardwareVersionKey,omitempty"` } func init() { @@ -9013,6 +9743,7 @@ type ConfigTarget struct { NumCpus int32 `xml:"numCpus"` NumCpuCores int32 `xml:"numCpuCores"` NumNumaNodes int32 `xml:"numNumaNodes"` + MaxCpusPerHost int32 `xml:"maxCpusPerHost,omitempty"` SmcPresent *bool `xml:"smcPresent"` Datastore []VirtualMachineDatastoreInfo `xml:"datastore,omitempty"` Network []VirtualMachineNetworkInfo `xml:"network,omitempty"` @@ -9030,6 +9761,7 @@ type ConfigTarget struct { ScsiDisk []VirtualMachineScsiDiskDeviceInfo `xml:"scsiDisk,omitempty"` IdeDisk []VirtualMachineIdeDiskDeviceInfo `xml:"ideDisk,omitempty"` MaxMemMBOptimalPerf int32 `xml:"maxMemMBOptimalPerf"` + SupportedMaxMemMB int32 `xml:"supportedMaxMemMB,omitempty"` ResourcePool *ResourcePoolRuntimeInfo `xml:"resourcePool,omitempty"` AutoVmotion *bool `xml:"autoVmotion"` PciPassthrough []BaseVirtualMachinePciPassthroughInfo `xml:"pciPassthrough,omitempty,typeattr"` @@ -9037,6 +9769,10 @@ type ConfigTarget struct { VFlashModule []VirtualMachineVFlashModuleInfo `xml:"vFlashModule,omitempty"` SharedGpuPassthroughTypes []VirtualMachinePciSharedGpuPassthroughInfo `xml:"sharedGpuPassthroughTypes,omitempty"` AvailablePersistentMemoryReservationMB int64 `xml:"availablePersistentMemoryReservationMB,omitempty"` + DynamicPassthrough []VirtualMachineDynamicPassthroughInfo `xml:"dynamicPassthrough,omitempty"` + SgxTargetInfo *VirtualMachineSgxTargetInfo `xml:"sgxTargetInfo,omitempty"` + PrecisionClockInfo []VirtualMachinePrecisionClockInfo `xml:"precisionClockInfo,omitempty"` + SevSupported *bool `xml:"sevSupported"` } func init() { @@ -9101,8 +9837,9 @@ type ConfigureDatastorePrincipalResponse struct { } type ConfigureEvcModeRequestType struct { - This ManagedObjectReference `xml:"_this"` - EvcModeKey string `xml:"evcModeKey"` + This ManagedObjectReference `xml:"_this"` + EvcModeKey string `xml:"evcModeKey"` + EvcGraphicsModeKey string `xml:"evcGraphicsModeKey,omitempty"` } func init() { @@ -9119,6 +9856,26 @@ type ConfigureEvcMode_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type ConfigureHCIRequestType struct { + This ManagedObjectReference `xml:"_this"` + ClusterSpec ClusterComputeResourceHCIConfigSpec `xml:"clusterSpec"` + HostInputs []ClusterComputeResourceHostConfigurationInput `xml:"hostInputs,omitempty"` +} + +func init() { + t["ConfigureHCIRequestType"] = reflect.TypeOf((*ConfigureHCIRequestType)(nil)).Elem() +} + +type ConfigureHCI_Task ConfigureHCIRequestType + +func init() { + t["ConfigureHCI_Task"] = reflect.TypeOf((*ConfigureHCI_Task)(nil)).Elem() +} + +type ConfigureHCI_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type ConfigureHostCacheRequestType struct { This ManagedObjectReference `xml:"_this"` Spec HostCacheConfigurationSpec `xml:"spec"` @@ -9259,6 +10016,43 @@ func init() { t["ConflictingDatastoreFoundFault"] = reflect.TypeOf((*ConflictingDatastoreFoundFault)(nil)).Elem() } +type ConnectNvmeController ConnectNvmeControllerRequestType + +func init() { + t["ConnectNvmeController"] = reflect.TypeOf((*ConnectNvmeController)(nil)).Elem() +} + +type ConnectNvmeControllerExRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConnectSpec []HostNvmeConnectSpec `xml:"connectSpec,omitempty"` +} + +func init() { + t["ConnectNvmeControllerExRequestType"] = reflect.TypeOf((*ConnectNvmeControllerExRequestType)(nil)).Elem() +} + +type ConnectNvmeControllerEx_Task ConnectNvmeControllerExRequestType + +func init() { + t["ConnectNvmeControllerEx_Task"] = reflect.TypeOf((*ConnectNvmeControllerEx_Task)(nil)).Elem() +} + +type ConnectNvmeControllerEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type ConnectNvmeControllerRequestType struct { + This ManagedObjectReference `xml:"_this"` + ConnectSpec HostNvmeConnectSpec `xml:"connectSpec"` +} + +func init() { + t["ConnectNvmeControllerRequestType"] = reflect.TypeOf((*ConnectNvmeControllerRequestType)(nil)).Elem() +} + +type ConnectNvmeControllerResponse struct { +} + type ConnectedIso struct { OvfExport @@ -9756,6 +10550,7 @@ type CreateDirectoryRequestType struct { Datastore ManagedObjectReference `xml:"datastore"` DisplayName string `xml:"displayName,omitempty"` Policy string `xml:"policy,omitempty"` + Size int64 `xml:"size,omitempty"` } func init() { @@ -10023,6 +10818,43 @@ type CreateNvdimmNamespace_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type CreateNvdimmPMemNamespaceRequestType struct { + This ManagedObjectReference `xml:"_this"` + CreateSpec NvdimmPMemNamespaceCreateSpec `xml:"createSpec"` +} + +func init() { + t["CreateNvdimmPMemNamespaceRequestType"] = reflect.TypeOf((*CreateNvdimmPMemNamespaceRequestType)(nil)).Elem() +} + +type CreateNvdimmPMemNamespace_Task CreateNvdimmPMemNamespaceRequestType + +func init() { + t["CreateNvdimmPMemNamespace_Task"] = reflect.TypeOf((*CreateNvdimmPMemNamespace_Task)(nil)).Elem() +} + +type CreateNvdimmPMemNamespace_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type CreateNvmeOverRdmaAdapter CreateNvmeOverRdmaAdapterRequestType + +func init() { + t["CreateNvmeOverRdmaAdapter"] = reflect.TypeOf((*CreateNvmeOverRdmaAdapter)(nil)).Elem() +} + +type CreateNvmeOverRdmaAdapterRequestType struct { + This ManagedObjectReference `xml:"_this"` + RdmaDeviceName string `xml:"rdmaDeviceName"` +} + +func init() { + t["CreateNvmeOverRdmaAdapterRequestType"] = reflect.TypeOf((*CreateNvmeOverRdmaAdapterRequestType)(nil)).Elem() +} + +type CreateNvmeOverRdmaAdapterResponse struct { +} + type CreateObjectScheduledTask CreateObjectScheduledTaskRequestType func init() { @@ -10261,6 +11093,24 @@ type CreateSnapshot_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type CreateSoftwareAdapter CreateSoftwareAdapterRequestType + +func init() { + t["CreateSoftwareAdapter"] = reflect.TypeOf((*CreateSoftwareAdapter)(nil)).Elem() +} + +type CreateSoftwareAdapterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Spec BaseHostHbaCreateSpec `xml:"spec,typeattr"` +} + +func init() { + t["CreateSoftwareAdapterRequestType"] = reflect.TypeOf((*CreateSoftwareAdapterRequestType)(nil)).Elem() +} + +type CreateSoftwareAdapterResponse struct { +} + type CreateStoragePod CreateStoragePodRequestType func init() { @@ -10507,15 +11357,33 @@ func init() { type CryptoKeyResult struct { DynamicData - KeyId CryptoKeyId `xml:"keyId"` - Success bool `xml:"success"` - Reason string `xml:"reason,omitempty"` + KeyId CryptoKeyId `xml:"keyId"` + Success bool `xml:"success"` + Reason string `xml:"reason,omitempty"` + Fault *LocalizedMethodFault `xml:"fault,omitempty"` } func init() { t["CryptoKeyResult"] = reflect.TypeOf((*CryptoKeyResult)(nil)).Elem() } +type CryptoManagerHostDisable CryptoManagerHostDisableRequestType + +func init() { + t["CryptoManagerHostDisable"] = reflect.TypeOf((*CryptoManagerHostDisable)(nil)).Elem() +} + +type CryptoManagerHostDisableRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["CryptoManagerHostDisableRequestType"] = reflect.TypeOf((*CryptoManagerHostDisableRequestType)(nil)).Elem() +} + +type CryptoManagerHostDisableResponse struct { +} + type CryptoManagerHostEnable CryptoManagerHostEnableRequestType func init() { @@ -10573,6 +11441,8 @@ type CryptoManagerKmipClusterStatus struct { DynamicData ClusterId KeyProviderId `xml:"clusterId"` + OverallStatus ManagedEntityStatus `xml:"overallStatus,omitempty"` + ManagementType string `xml:"managementType,omitempty"` Servers []CryptoManagerKmipServerStatus `xml:"servers"` ClientCertInfo *CryptoManagerKmipCertificateInfo `xml:"clientCertInfo,omitempty"` } @@ -10581,6 +11451,21 @@ func init() { t["CryptoManagerKmipClusterStatus"] = reflect.TypeOf((*CryptoManagerKmipClusterStatus)(nil)).Elem() } +type CryptoManagerKmipCryptoKeyStatus struct { + DynamicData + + KeyId CryptoKeyId `xml:"keyId"` + KeyAvailable *bool `xml:"keyAvailable"` + Reason string `xml:"reason,omitempty"` + EncryptedVMs []ManagedObjectReference `xml:"encryptedVMs,omitempty"` + AffectedHosts []ManagedObjectReference `xml:"affectedHosts,omitempty"` + ReferencedByTags []string `xml:"referencedByTags,omitempty"` +} + +func init() { + t["CryptoManagerKmipCryptoKeyStatus"] = reflect.TypeOf((*CryptoManagerKmipCryptoKeyStatus)(nil)).Elem() +} + type CryptoManagerKmipServerCertInfo struct { DynamicData @@ -10821,6 +11706,17 @@ func init() { t["CustomizationAutoIpV6Generator"] = reflect.TypeOf((*CustomizationAutoIpV6Generator)(nil)).Elem() } +type CustomizationCloudinitPrep struct { + CustomizationIdentitySettings + + Metadata string `xml:"metadata"` + Userdata string `xml:"userdata,omitempty"` +} + +func init() { + t["CustomizationCloudinitPrep"] = reflect.TypeOf((*CustomizationCloudinitPrep)(nil)).Elem() +} + type CustomizationCustomIpGenerator struct { CustomizationIpGenerator @@ -10879,6 +11775,8 @@ func init() { type CustomizationFailed struct { CustomizationEvent + + Reason string `xml:"reason,omitempty"` } func init() { @@ -11064,6 +11962,7 @@ type CustomizationLinuxPrep struct { Domain string `xml:"domain"` TimeZone string `xml:"timeZone,omitempty"` HwClockUTC *bool `xml:"hwClockUTC"` + ScriptText string `xml:"scriptText,omitempty"` } func init() { @@ -11311,6 +12210,28 @@ func init() { t["CustomizationWinOptions"] = reflect.TypeOf((*CustomizationWinOptions)(nil)).Elem() } +type CustomizeGuestRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` + Spec CustomizationSpec `xml:"spec"` + ConfigParams []BaseOptionValue `xml:"configParams,omitempty,typeattr"` +} + +func init() { + t["CustomizeGuestRequestType"] = reflect.TypeOf((*CustomizeGuestRequestType)(nil)).Elem() +} + +type CustomizeGuest_Task CustomizeGuestRequestType + +func init() { + t["CustomizeGuest_Task"] = reflect.TypeOf((*CustomizeGuest_Task)(nil)).Elem() +} + +type CustomizeGuest_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type CustomizeVMRequestType struct { This ManagedObjectReference `xml:"_this"` Spec CustomizationSpec `xml:"spec"` @@ -11433,6 +12354,7 @@ type DVPortgroupConfigInfo struct { DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` Description string `xml:"description,omitempty"` Type string `xml:"type"` + BackingType string `xml:"backingType,omitempty"` Policy BaseDVPortgroupPolicy `xml:"policy,typeattr"` PortNameFormat string `xml:"portNameFormat,omitempty"` Scope []ManagedObjectReference `xml:"scope,omitempty"` @@ -11441,6 +12363,10 @@ type DVPortgroupConfigInfo struct { AutoExpand *bool `xml:"autoExpand"` VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty"` Uplink *bool `xml:"uplink"` + TransportZoneUuid string `xml:"transportZoneUuid,omitempty"` + TransportZoneName string `xml:"transportZoneName,omitempty"` + LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty"` + SegmentId string `xml:"segmentId,omitempty"` } func init() { @@ -11457,11 +12383,16 @@ type DVPortgroupConfigSpec struct { DefaultPortConfig BaseDVPortSetting `xml:"defaultPortConfig,omitempty,typeattr"` Description string `xml:"description,omitempty"` Type string `xml:"type,omitempty"` + BackingType string `xml:"backingType,omitempty"` Scope []ManagedObjectReference `xml:"scope,omitempty"` Policy BaseDVPortgroupPolicy `xml:"policy,omitempty,typeattr"` VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` AutoExpand *bool `xml:"autoExpand"` VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty"` + TransportZoneUuid string `xml:"transportZoneUuid,omitempty"` + TransportZoneName string `xml:"transportZoneName,omitempty"` + LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty"` + SegmentId string `xml:"segmentId,omitempty"` } func init() { @@ -12187,10 +13118,28 @@ func init() { t["DatabaseSizeParam"] = reflect.TypeOf((*DatabaseSizeParam)(nil)).Elem() } +type DatacenterBasicConnectInfo struct { + DynamicData + + Hostname string `xml:"hostname,omitempty"` + Error *LocalizedMethodFault `xml:"error,omitempty"` + ServerIp string `xml:"serverIp,omitempty"` + NumVm int32 `xml:"numVm,omitempty"` + NumPoweredOnVm int32 `xml:"numPoweredOnVm,omitempty"` + HostProductInfo *AboutInfo `xml:"hostProductInfo,omitempty"` + HardwareVendor string `xml:"hardwareVendor,omitempty"` + HardwareModel string `xml:"hardwareModel,omitempty"` +} + +func init() { + t["DatacenterBasicConnectInfo"] = reflect.TypeOf((*DatacenterBasicConnectInfo)(nil)).Elem() +} + type DatacenterConfigInfo struct { DynamicData DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + MaximumHardwareVersionKey string `xml:"maximumHardwareVersionKey,omitempty"` } func init() { @@ -12201,6 +13150,7 @@ type DatacenterConfigSpec struct { DynamicData DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty"` + MaximumHardwareVersionKey string `xml:"maximumHardwareVersionKey,omitempty"` } func init() { @@ -12288,6 +13238,7 @@ type DatastoreCapability struct { VsanSparseSupported *bool `xml:"vsanSparseSupported"` UpitSupported *bool `xml:"upitSupported"` VmdkExpandSupported *bool `xml:"vmdkExpandSupported"` + ClusteredVmdkSupported *bool `xml:"clusteredVmdkSupported"` } func init() { @@ -12459,6 +13410,7 @@ type DatastoreInfo struct { MaxMemoryFileSize int64 `xml:"maxMemoryFileSize,omitempty"` Timestamp *time.Time `xml:"timestamp"` ContainerId string `xml:"containerId,omitempty"` + AliasOf string `xml:"aliasOf,omitempty"` } func init() { @@ -12909,6 +13861,26 @@ type DeleteSnapshot_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type DeleteVStorageObjectExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["DeleteVStorageObjectExRequestType"] = reflect.TypeOf((*DeleteVStorageObjectExRequestType)(nil)).Elem() +} + +type DeleteVStorageObjectEx_Task DeleteVStorageObjectExRequestType + +func init() { + t["DeleteVStorageObjectEx_Task"] = reflect.TypeOf((*DeleteVStorageObjectEx_Task)(nil)).Elem() +} + +type DeleteVStorageObjectEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type DeleteVStorageObjectRequestType struct { This ManagedObjectReference `xml:"_this"` Id ID `xml:"id"` @@ -13069,6 +14041,50 @@ func init() { type DeselectVnicResponse struct { } +type DesiredSoftwareSpec struct { + DynamicData + + BaseImageSpec DesiredSoftwareSpecBaseImageSpec `xml:"baseImageSpec"` + VendorAddOnSpec *DesiredSoftwareSpecVendorAddOnSpec `xml:"vendorAddOnSpec,omitempty"` + Components []DesiredSoftwareSpecComponentSpec `xml:"components,omitempty"` +} + +func init() { + t["DesiredSoftwareSpec"] = reflect.TypeOf((*DesiredSoftwareSpec)(nil)).Elem() +} + +type DesiredSoftwareSpecBaseImageSpec struct { + DynamicData + + Version string `xml:"version"` +} + +func init() { + t["DesiredSoftwareSpecBaseImageSpec"] = reflect.TypeOf((*DesiredSoftwareSpecBaseImageSpec)(nil)).Elem() +} + +type DesiredSoftwareSpecComponentSpec struct { + DynamicData + + Name string `xml:"name"` + Version string `xml:"version,omitempty"` +} + +func init() { + t["DesiredSoftwareSpecComponentSpec"] = reflect.TypeOf((*DesiredSoftwareSpecComponentSpec)(nil)).Elem() +} + +type DesiredSoftwareSpecVendorAddOnSpec struct { + DynamicData + + Name string `xml:"name"` + Version string `xml:"version"` +} + +func init() { + t["DesiredSoftwareSpecVendorAddOnSpec"] = reflect.TypeOf((*DesiredSoftwareSpecVendorAddOnSpec)(nil)).Elem() +} + type DestinationSwitchFull struct { CannotAccessNetwork } @@ -13495,6 +14511,17 @@ func init() { t["DeviceUnsupportedForVmVersionFault"] = reflect.TypeOf((*DeviceUnsupportedForVmVersionFault)(nil)).Elem() } +type DiagnosticManagerAuditRecordResult struct { + DynamicData + + Records []string `xml:"records,omitempty"` + NextToken string `xml:"nextToken"` +} + +func init() { + t["DiagnosticManagerAuditRecordResult"] = reflect.TypeOf((*DiagnosticManagerAuditRecordResult)(nil)).Elem() +} + type DiagnosticManagerBundleInfo struct { DynamicData @@ -13575,6 +14602,43 @@ func init() { t["DisableAdminNotSupportedFault"] = reflect.TypeOf((*DisableAdminNotSupportedFault)(nil)).Elem() } +type DisableAlarm DisableAlarmRequestType + +func init() { + t["DisableAlarm"] = reflect.TypeOf((*DisableAlarm)(nil)).Elem() +} + +type DisableAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Alarm ManagedObjectReference `xml:"alarm"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["DisableAlarmRequestType"] = reflect.TypeOf((*DisableAlarmRequestType)(nil)).Elem() +} + +type DisableAlarmResponse struct { +} + +type DisableClusteredVmdkSupport DisableClusteredVmdkSupportRequestType + +func init() { + t["DisableClusteredVmdkSupport"] = reflect.TypeOf((*DisableClusteredVmdkSupport)(nil)).Elem() +} + +type DisableClusteredVmdkSupportRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["DisableClusteredVmdkSupportRequestType"] = reflect.TypeOf((*DisableClusteredVmdkSupportRequestType)(nil)).Elem() +} + +type DisableClusteredVmdkSupportResponse struct { +} + type DisableEvcModeRequestType struct { This ManagedObjectReference `xml:"_this"` } @@ -13784,6 +14848,43 @@ type DisconnectHost_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type DisconnectNvmeController DisconnectNvmeControllerRequestType + +func init() { + t["DisconnectNvmeController"] = reflect.TypeOf((*DisconnectNvmeController)(nil)).Elem() +} + +type DisconnectNvmeControllerExRequestType struct { + This ManagedObjectReference `xml:"_this"` + DisconnectSpec []HostNvmeDisconnectSpec `xml:"disconnectSpec,omitempty"` +} + +func init() { + t["DisconnectNvmeControllerExRequestType"] = reflect.TypeOf((*DisconnectNvmeControllerExRequestType)(nil)).Elem() +} + +type DisconnectNvmeControllerEx_Task DisconnectNvmeControllerExRequestType + +func init() { + t["DisconnectNvmeControllerEx_Task"] = reflect.TypeOf((*DisconnectNvmeControllerEx_Task)(nil)).Elem() +} + +type DisconnectNvmeControllerEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type DisconnectNvmeControllerRequestType struct { + This ManagedObjectReference `xml:"_this"` + DisconnectSpec HostNvmeDisconnectSpec `xml:"disconnectSpec"` +} + +func init() { + t["DisconnectNvmeControllerRequestType"] = reflect.TypeOf((*DisconnectNvmeControllerRequestType)(nil)).Elem() +} + +type DisconnectNvmeControllerResponse struct { +} + type DisconnectedHostsBlockingEVC struct { EVCConfigFault } @@ -13816,6 +14917,25 @@ func init() { type DiscoverFcoeHbasResponse struct { } +type DiscoverNvmeControllers DiscoverNvmeControllersRequestType + +func init() { + t["DiscoverNvmeControllers"] = reflect.TypeOf((*DiscoverNvmeControllers)(nil)).Elem() +} + +type DiscoverNvmeControllersRequestType struct { + This ManagedObjectReference `xml:"_this"` + DiscoverSpec HostNvmeDiscoverSpec `xml:"discoverSpec"` +} + +func init() { + t["DiscoverNvmeControllersRequestType"] = reflect.TypeOf((*DiscoverNvmeControllersRequestType)(nil)).Elem() +} + +type DiscoverNvmeControllersResponse struct { + Returnval HostNvmeDiscoveryLog `xml:"returnval"` +} + type DiskChangeExtent struct { DynamicData @@ -13839,6 +14959,17 @@ func init() { t["DiskChangeInfo"] = reflect.TypeOf((*DiskChangeInfo)(nil)).Elem() } +type DiskCryptoSpec struct { + DynamicData + + Parent *DiskCryptoSpec `xml:"parent,omitempty"` + Crypto BaseCryptoSpec `xml:"crypto,typeattr"` +} + +func init() { + t["DiskCryptoSpec"] = reflect.TypeOf((*DiskCryptoSpec)(nil)).Elem() +} + type DiskHasPartitions struct { VsanDiskFault } @@ -13972,6 +15103,8 @@ type DistributedVirtualPort struct { ConnectionCookie int32 `xml:"connectionCookie,omitempty"` LastStatusChange time.Time `xml:"lastStatusChange"` HostLocalPort *bool `xml:"hostLocalPort"` + ExternalId string `xml:"externalId,omitempty"` + SegmentPortId string `xml:"segmentPortId,omitempty"` } func init() { @@ -13989,12 +15122,37 @@ type DistributedVirtualPortgroupInfo struct { UplinkPortgroup bool `xml:"uplinkPortgroup"` Portgroup ManagedObjectReference `xml:"portgroup"` NetworkReservationSupported *bool `xml:"networkReservationSupported"` + BackingType string `xml:"backingType,omitempty"` + LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty"` + SegmentId string `xml:"segmentId,omitempty"` } func init() { t["DistributedVirtualPortgroupInfo"] = reflect.TypeOf((*DistributedVirtualPortgroupInfo)(nil)).Elem() } +type DistributedVirtualPortgroupNsxPortgroupOperationResult struct { + DynamicData + + Portgroups []ManagedObjectReference `xml:"portgroups,omitempty"` + Problems []DistributedVirtualPortgroupProblem `xml:"problems,omitempty"` +} + +func init() { + t["DistributedVirtualPortgroupNsxPortgroupOperationResult"] = reflect.TypeOf((*DistributedVirtualPortgroupNsxPortgroupOperationResult)(nil)).Elem() +} + +type DistributedVirtualPortgroupProblem struct { + DynamicData + + LogicalSwitchUuid string `xml:"logicalSwitchUuid"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["DistributedVirtualPortgroupProblem"] = reflect.TypeOf((*DistributedVirtualPortgroupProblem)(nil)).Elem() +} + type DistributedVirtualSwitchHostMember struct { DynamicData @@ -14021,10 +15179,15 @@ func init() { type DistributedVirtualSwitchHostMemberConfigInfo struct { DynamicData - Host *ManagedObjectReference `xml:"host,omitempty"` - MaxProxySwitchPorts int32 `xml:"maxProxySwitchPorts"` - VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` - Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,typeattr"` + Host *ManagedObjectReference `xml:"host,omitempty"` + MaxProxySwitchPorts int32 `xml:"maxProxySwitchPorts"` + VendorSpecificConfig []DistributedVirtualSwitchKeyedOpaqueBlob `xml:"vendorSpecificConfig,omitempty"` + Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,typeattr"` + NsxSwitch *bool `xml:"nsxSwitch"` + EnsEnabled *bool `xml:"ensEnabled"` + EnsInterruptEnabled *bool `xml:"ensInterruptEnabled"` + TransportZones []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"transportZones,omitempty"` + NsxtUsedUplinkNames []string `xml:"nsxtUsedUplinkNames,omitempty"` } func init() { @@ -14078,6 +15241,17 @@ func init() { t["DistributedVirtualSwitchHostMemberRuntimeState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberRuntimeState)(nil)).Elem() } +type DistributedVirtualSwitchHostMemberTransportZoneInfo struct { + DynamicData + + Uuid string `xml:"uuid"` + Type string `xml:"type"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberTransportZoneInfo"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberTransportZoneInfo)(nil)).Elem() +} + type DistributedVirtualSwitchHostProductSpec struct { DynamicData @@ -14230,6 +15404,7 @@ type DistributedVirtualSwitchPortCriteria struct { Connected *bool `xml:"connected"` Active *bool `xml:"active"` UplinkPort *bool `xml:"uplinkPort"` + NsxPort *bool `xml:"nsxPort"` Scope *ManagedObjectReference `xml:"scope,omitempty"` PortgroupKey []string `xml:"portgroupKey,omitempty"` Inside *bool `xml:"inside"` @@ -14319,6 +15494,43 @@ func init() { t["DomainNotFoundFault"] = reflect.TypeOf((*DomainNotFoundFault)(nil)).Elem() } +type DownloadDescriptionTree DownloadDescriptionTreeRequestType + +func init() { + t["DownloadDescriptionTree"] = reflect.TypeOf((*DownloadDescriptionTree)(nil)).Elem() +} + +type DownloadDescriptionTreeRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["DownloadDescriptionTreeRequestType"] = reflect.TypeOf((*DownloadDescriptionTreeRequestType)(nil)).Elem() +} + +type DownloadDescriptionTreeResponse struct { + Returnval []byte `xml:"returnval"` +} + +type DropConnections DropConnectionsRequestType + +func init() { + t["DropConnections"] = reflect.TypeOf((*DropConnections)(nil)).Elem() +} + +type DropConnectionsRequestType struct { + This ManagedObjectReference `xml:"_this"` + ListOfConnections []BaseVirtualMachineConnection `xml:"listOfConnections,omitempty,typeattr"` +} + +func init() { + t["DropConnectionsRequestType"] = reflect.TypeOf((*DropConnectionsRequestType)(nil)).Elem() +} + +type DropConnectionsResponse struct { + Returnval bool `xml:"returnval"` +} + type DrsDisabledEvent struct { ClusterEvent } @@ -15774,6 +16986,12 @@ func init() { t["ElementDescription"] = reflect.TypeOf((*ElementDescription)(nil)).Elem() } +type EnableAlarm EnableAlarmRequestType + +func init() { + t["EnableAlarm"] = reflect.TypeOf((*EnableAlarm)(nil)).Elem() +} + type EnableAlarmActions EnableAlarmActionsRequestType func init() { @@ -15793,6 +17011,37 @@ func init() { type EnableAlarmActionsResponse struct { } +type EnableAlarmRequestType struct { + This ManagedObjectReference `xml:"_this"` + Alarm ManagedObjectReference `xml:"alarm"` + Entity ManagedObjectReference `xml:"entity"` +} + +func init() { + t["EnableAlarmRequestType"] = reflect.TypeOf((*EnableAlarmRequestType)(nil)).Elem() +} + +type EnableAlarmResponse struct { +} + +type EnableClusteredVmdkSupport EnableClusteredVmdkSupportRequestType + +func init() { + t["EnableClusteredVmdkSupport"] = reflect.TypeOf((*EnableClusteredVmdkSupport)(nil)).Elem() +} + +type EnableClusteredVmdkSupportRequestType struct { + This ManagedObjectReference `xml:"_this"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["EnableClusteredVmdkSupportRequestType"] = reflect.TypeOf((*EnableClusteredVmdkSupportRequestType)(nil)).Elem() +} + +type EnableClusteredVmdkSupportResponse struct { +} + type EnableCrypto EnableCryptoRequestType func init() { @@ -16724,6 +17973,26 @@ type ExtendDisk_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type ExtendHCIRequestType struct { + This ManagedObjectReference `xml:"_this"` + HostInputs []ClusterComputeResourceHostConfigurationInput `xml:"hostInputs,omitempty"` + VSanConfigSpec *SDDCBase `xml:"vSanConfigSpec,omitempty"` +} + +func init() { + t["ExtendHCIRequestType"] = reflect.TypeOf((*ExtendHCIRequestType)(nil)).Elem() +} + +type ExtendHCI_Task ExtendHCIRequestType + +func init() { + t["ExtendHCI_Task"] = reflect.TypeOf((*ExtendHCI_Task)(nil)).Elem() +} + +type ExtendHCI_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type ExtendVffs ExtendVffsRequestType func init() { @@ -17400,6 +18669,18 @@ func init() { t["FcoeFaultPnicHasNoPortSetFault"] = reflect.TypeOf((*FcoeFaultPnicHasNoPortSetFault)(nil)).Elem() } +type FeatureEVCMode struct { + ElementDescription + + Mask []HostFeatureMask `xml:"mask,omitempty"` + Capability []HostFeatureCapability `xml:"capability,omitempty"` + Requirement []VirtualMachineFeatureRequirement `xml:"requirement,omitempty"` +} + +func init() { + t["FeatureEVCMode"] = reflect.TypeOf((*FeatureEVCMode)(nil)).Elem() +} + type FeatureRequirementsNotMet struct { VirtualHardwareCompatibilityIssue @@ -17418,6 +18699,25 @@ func init() { t["FeatureRequirementsNotMetFault"] = reflect.TypeOf((*FeatureRequirementsNotMetFault)(nil)).Elem() } +type FetchAuditRecords FetchAuditRecordsRequestType + +func init() { + t["FetchAuditRecords"] = reflect.TypeOf((*FetchAuditRecords)(nil)).Elem() +} + +type FetchAuditRecordsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Token string `xml:"token,omitempty"` +} + +func init() { + t["FetchAuditRecordsRequestType"] = reflect.TypeOf((*FetchAuditRecordsRequestType)(nil)).Elem() +} + +type FetchAuditRecordsResponse struct { + Returnval DiagnosticManagerAuditRecordResult `xml:"returnval"` +} + type FetchDVPortKeys FetchDVPortKeysRequestType func init() { @@ -17993,6 +19293,29 @@ func init() { t["FloppyImageFileQuery"] = reflect.TypeOf((*FloppyImageFileQuery)(nil)).Elem() } +type FolderBatchAddHostsToClusterResult struct { + DynamicData + + HostsAddedToCluster []ManagedObjectReference `xml:"hostsAddedToCluster,omitempty"` + HostsFailedInventoryAdd []FolderFailedHostResult `xml:"hostsFailedInventoryAdd,omitempty"` + HostsFailedMoveToCluster []FolderFailedHostResult `xml:"hostsFailedMoveToCluster,omitempty"` +} + +func init() { + t["FolderBatchAddHostsToClusterResult"] = reflect.TypeOf((*FolderBatchAddHostsToClusterResult)(nil)).Elem() +} + +type FolderBatchAddStandaloneHostsResult struct { + DynamicData + + AddedHosts []ManagedObjectReference `xml:"addedHosts,omitempty"` + HostsFailedInventoryAdd []FolderFailedHostResult `xml:"hostsFailedInventoryAdd,omitempty"` +} + +func init() { + t["FolderBatchAddStandaloneHostsResult"] = reflect.TypeOf((*FolderBatchAddStandaloneHostsResult)(nil)).Elem() +} + type FolderEventArgument struct { EntityEventArgument @@ -18003,6 +19326,19 @@ func init() { t["FolderEventArgument"] = reflect.TypeOf((*FolderEventArgument)(nil)).Elem() } +type FolderFailedHostResult struct { + DynamicData + + HostName string `xml:"hostName,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Context LocalizableMessage `xml:"context"` + Fault LocalizedMethodFault `xml:"fault"` +} + +func init() { + t["FolderFailedHostResult"] = reflect.TypeOf((*FolderFailedHostResult)(nil)).Elem() +} + type FolderFileInfo struct { FileInfo } @@ -18019,6 +19355,17 @@ func init() { t["FolderFileQuery"] = reflect.TypeOf((*FolderFileQuery)(nil)).Elem() } +type FolderNewHostSpec struct { + DynamicData + + HostCnxSpec HostConnectSpec `xml:"hostCnxSpec"` + EsxLicense string `xml:"esxLicense,omitempty"` +} + +func init() { + t["FolderNewHostSpec"] = reflect.TypeOf((*FolderNewHostSpec)(nil)).Elem() +} + type FormatVffs FormatVffsRequestType func init() { @@ -18546,6 +19893,26 @@ type GetCustomizationSpecResponse struct { Returnval CustomizationSpecItem `xml:"returnval"` } +type GetDefaultKmsCluster GetDefaultKmsClusterRequestType + +func init() { + t["GetDefaultKmsCluster"] = reflect.TypeOf((*GetDefaultKmsCluster)(nil)).Elem() +} + +type GetDefaultKmsClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + DefaultsToParent *bool `xml:"defaultsToParent"` +} + +func init() { + t["GetDefaultKmsClusterRequestType"] = reflect.TypeOf((*GetDefaultKmsClusterRequestType)(nil)).Elem() +} + +type GetDefaultKmsClusterResponse struct { + Returnval *KeyProviderId `xml:"returnval,omitempty"` +} + type GetPublicKey GetPublicKeyRequestType func init() { @@ -18582,6 +19949,42 @@ type GetResourceUsageResponse struct { Returnval ClusterResourceUsageSummary `xml:"returnval"` } +type GetSiteInfo GetSiteInfoRequestType + +func init() { + t["GetSiteInfo"] = reflect.TypeOf((*GetSiteInfo)(nil)).Elem() +} + +type GetSiteInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetSiteInfoRequestType"] = reflect.TypeOf((*GetSiteInfoRequestType)(nil)).Elem() +} + +type GetSiteInfoResponse struct { + Returnval SiteInfo `xml:"returnval"` +} + +type GetSystemVMsRestrictedDatastores GetSystemVMsRestrictedDatastoresRequestType + +func init() { + t["GetSystemVMsRestrictedDatastores"] = reflect.TypeOf((*GetSystemVMsRestrictedDatastores)(nil)).Elem() +} + +type GetSystemVMsRestrictedDatastoresRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["GetSystemVMsRestrictedDatastoresRequestType"] = reflect.TypeOf((*GetSystemVMsRestrictedDatastoresRequestType)(nil)).Elem() +} + +type GetSystemVMsRestrictedDatastoresResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + type GetVchaClusterHealth GetVchaClusterHealthRequestType func init() { @@ -18752,9 +20155,11 @@ func init() { type GuestDiskInfo struct { DynamicData - DiskPath string `xml:"diskPath,omitempty"` - Capacity int64 `xml:"capacity,omitempty"` - FreeSpace int64 `xml:"freeSpace,omitempty"` + DiskPath string `xml:"diskPath,omitempty"` + Capacity int64 `xml:"capacity,omitempty"` + FreeSpace int64 `xml:"freeSpace,omitempty"` + FilesystemType string `xml:"filesystemType,omitempty"` + Mappings []GuestInfoVirtualDiskMapping `xml:"mappings,omitempty"` } func init() { @@ -18812,12 +20217,27 @@ type GuestInfo struct { InteractiveGuestOperationsReady *bool `xml:"interactiveGuestOperationsReady"` GuestStateChangeSupported *bool `xml:"guestStateChangeSupported"` GenerationInfo []GuestInfoNamespaceGenerationInfo `xml:"generationInfo,omitempty"` + HwVersion string `xml:"hwVersion,omitempty"` + CustomizationInfo *GuestInfoCustomizationInfo `xml:"customizationInfo,omitempty"` } func init() { t["GuestInfo"] = reflect.TypeOf((*GuestInfo)(nil)).Elem() } +type GuestInfoCustomizationInfo struct { + DynamicData + + CustomizationStatus string `xml:"customizationStatus"` + StartTime *time.Time `xml:"startTime"` + EndTime *time.Time `xml:"endTime"` + ErrorMsg string `xml:"errorMsg,omitempty"` +} + +func init() { + t["GuestInfoCustomizationInfo"] = reflect.TypeOf((*GuestInfoCustomizationInfo)(nil)).Elem() +} + type GuestInfoNamespaceGenerationInfo struct { DynamicData @@ -18829,6 +20249,16 @@ func init() { t["GuestInfoNamespaceGenerationInfo"] = reflect.TypeOf((*GuestInfoNamespaceGenerationInfo)(nil)).Elem() } +type GuestInfoVirtualDiskMapping struct { + DynamicData + + Key int32 `xml:"key"` +} + +func init() { + t["GuestInfoVirtualDiskMapping"] = reflect.TypeOf((*GuestInfoVirtualDiskMapping)(nil)).Elem() +} + type GuestListFileInfo struct { DynamicData @@ -18973,7 +20403,9 @@ type GuestOsDescriptor struct { NumRecommendedCoresPerSocket int32 `xml:"numRecommendedCoresPerSocket,omitempty"` VvtdSupported *BoolOption `xml:"vvtdSupported,omitempty"` VbsSupported *BoolOption `xml:"vbsSupported,omitempty"` + VsgxSupported *BoolOption `xml:"vsgxSupported,omitempty"` SupportsTPM20 *bool `xml:"supportsTPM20"` + VwdtSupported *bool `xml:"vwdtSupported"` } func init() { @@ -19704,6 +21136,39 @@ func init() { t["HostApplyProfile"] = reflect.TypeOf((*HostApplyProfile)(nil)).Elem() } +type HostAssignableHardwareBinding struct { + DynamicData + + InstanceId string `xml:"instanceId"` + Vm ManagedObjectReference `xml:"vm"` +} + +func init() { + t["HostAssignableHardwareBinding"] = reflect.TypeOf((*HostAssignableHardwareBinding)(nil)).Elem() +} + +type HostAssignableHardwareConfig struct { + DynamicData + + AttributeOverride []HostAssignableHardwareConfigAttributeOverride `xml:"attributeOverride,omitempty"` +} + +func init() { + t["HostAssignableHardwareConfig"] = reflect.TypeOf((*HostAssignableHardwareConfig)(nil)).Elem() +} + +type HostAssignableHardwareConfigAttributeOverride struct { + DynamicData + + InstanceId string `xml:"instanceId"` + Name string `xml:"name"` + Value AnyType `xml:"value,typeattr"` +} + +func init() { + t["HostAssignableHardwareConfigAttributeOverride"] = reflect.TypeOf((*HostAssignableHardwareConfigAttributeOverride)(nil)).Elem() +} + type HostAuthenticationManagerInfo struct { DynamicData @@ -19925,6 +21390,29 @@ type HostCapability struct { VmCreateDateSupported *bool `xml:"vmCreateDateSupported"` Vmfs3EOLSupported *bool `xml:"vmfs3EOLSupported"` FtVmcpSupported *bool `xml:"ftVmcpSupported"` + QuickBootSupported *bool `xml:"quickBootSupported"` + EncryptedFtSupported *bool `xml:"encryptedFtSupported"` + AssignableHardwareSupported *bool `xml:"assignableHardwareSupported"` + SuspendToMemorySupported *bool `xml:"suspendToMemorySupported"` + UseFeatureReqsForOldHWv *bool `xml:"useFeatureReqsForOldHWv"` + MarkPerenniallyReservedSupported *bool `xml:"markPerenniallyReservedSupported"` + HppPspSupported *bool `xml:"hppPspSupported"` + DeviceRebindWithoutRebootSupported *bool `xml:"deviceRebindWithoutRebootSupported"` + StoragePolicyChangeSupported *bool `xml:"storagePolicyChangeSupported"` + PrecisionTimeProtocolSupported *bool `xml:"precisionTimeProtocolSupported"` + RemoteDeviceVMotionSupported *bool `xml:"remoteDeviceVMotionSupported"` + MaxSupportedVmMemory int32 `xml:"maxSupportedVmMemory,omitempty"` + AhDeviceHintsSupported *bool `xml:"ahDeviceHintsSupported"` + NvmeOverTcpSupported *bool `xml:"nvmeOverTcpSupported"` + NvmeStorageFabricServicesSupported *bool `xml:"nvmeStorageFabricServicesSupported"` + AssignHwPciConfigSupported *bool `xml:"assignHwPciConfigSupported"` + TimeConfigSupported *bool `xml:"timeConfigSupported"` + NvmeBatchOperationsSupported *bool `xml:"nvmeBatchOperationsSupported"` + PMemFailoverSupported *bool `xml:"pMemFailoverSupported"` + HostConfigEncryptionSupported *bool `xml:"hostConfigEncryptionSupported"` + PtpConfigSupported *bool `xml:"ptpConfigSupported"` + MaxSupportedPtpPorts int32 `xml:"maxSupportedPtpPorts,omitempty"` + PMemIndependentSnapshotSupported *bool `xml:"pMemIndependentSnapshotSupported"` } func init() { @@ -20221,12 +21709,15 @@ type HostConfigInfo struct { DomainList []string `xml:"domainList,omitempty"` ScriptCheckSum []byte `xml:"scriptCheckSum,omitempty"` HostConfigCheckSum []byte `xml:"hostConfigCheckSum,omitempty"` + DescriptionTreeCheckSum []byte `xml:"descriptionTreeCheckSum,omitempty"` GraphicsInfo []HostGraphicsInfo `xml:"graphicsInfo,omitempty"` SharedPassthruGpuTypes []string `xml:"sharedPassthruGpuTypes,omitempty"` GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty"` SharedGpuCapabilities []HostSharedGpuCapabilities `xml:"sharedGpuCapabilities,omitempty"` IoFilterInfo []HostIoFilterInfo `xml:"ioFilterInfo,omitempty"` SriovDevicePool []BaseHostSriovDevicePoolInfo `xml:"sriovDevicePool,omitempty,typeattr"` + AssignableHardwareBinding []HostAssignableHardwareBinding `xml:"assignableHardwareBinding,omitempty"` + AssignableHardwareConfig *HostAssignableHardwareConfig `xml:"assignableHardwareConfig,omitempty"` } func init() { @@ -20274,6 +21765,7 @@ type HostConfigManager struct { CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty"` CryptoManager *ManagedObjectReference `xml:"cryptoManager,omitempty"` NvdimmSystem *ManagedObjectReference `xml:"nvdimmSystem,omitempty"` + AssignableHardwareManager *ManagedObjectReference `xml:"assignableHardwareManager,omitempty"` } func init() { @@ -20301,6 +21793,7 @@ type HostConfigSpec struct { ActiveDirectory []HostActiveDirectory `xml:"activeDirectory,omitempty"` GenericConfig []KeyAnyValue `xml:"genericConfig,omitempty"` GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty"` + AssignableHardwareConfig *HostAssignableHardwareConfig `xml:"assignableHardwareConfig,omitempty"` } func init() { @@ -20574,6 +22067,16 @@ func init() { t["HostDasOkEvent"] = reflect.TypeOf((*HostDasOkEvent)(nil)).Elem() } +type HostDataTransportConnectionInfo struct { + DynamicData + + StaticMemoryConsumed int64 `xml:"staticMemoryConsumed"` +} + +func init() { + t["HostDataTransportConnectionInfo"] = reflect.TypeOf((*HostDataTransportConnectionInfo)(nil)).Elem() +} + type HostDatastoreBrowserSearchResults struct { DynamicData @@ -20668,8 +22171,14 @@ func init() { type HostDateTimeConfig struct { DynamicData - TimeZone string `xml:"timeZone,omitempty"` - NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` + TimeZone string `xml:"timeZone,omitempty"` + NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` + PtpConfig *HostPtpConfig `xml:"ptpConfig,omitempty"` + Protocol string `xml:"protocol,omitempty"` + Enabled *bool `xml:"enabled"` + DisableEvents *bool `xml:"disableEvents"` + DisableFallback *bool `xml:"disableFallback"` + ResetToFactoryDefaults *bool `xml:"resetToFactoryDefaults"` } func init() { @@ -20679,14 +22188,38 @@ func init() { type HostDateTimeInfo struct { DynamicData - TimeZone HostDateTimeSystemTimeZone `xml:"timeZone"` - NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` + TimeZone HostDateTimeSystemTimeZone `xml:"timeZone"` + SystemClockProtocol string `xml:"systemClockProtocol,omitempty"` + NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty"` + PtpConfig *HostPtpConfig `xml:"ptpConfig,omitempty"` + Enabled *bool `xml:"enabled"` + DisableEvents *bool `xml:"disableEvents"` + DisableFallback *bool `xml:"disableFallback"` + InFallbackState *bool `xml:"inFallbackState"` + ServiceSync *bool `xml:"serviceSync"` + LastSyncTime *time.Time `xml:"lastSyncTime"` + RemoteNtpServer string `xml:"remoteNtpServer,omitempty"` + NtpRunTime int64 `xml:"ntpRunTime,omitempty"` + PtpRunTime int64 `xml:"ptpRunTime,omitempty"` + NtpDuration string `xml:"ntpDuration,omitempty"` + PtpDuration string `xml:"ptpDuration,omitempty"` } func init() { t["HostDateTimeInfo"] = reflect.TypeOf((*HostDateTimeInfo)(nil)).Elem() } +type HostDateTimeSystemServiceTestResult struct { + DynamicData + + WorkingNormally bool `xml:"workingNormally"` + Report []string `xml:"report,omitempty"` +} + +func init() { + t["HostDateTimeSystemServiceTestResult"] = reflect.TypeOf((*HostDateTimeSystemServiceTestResult)(nil)).Elem() +} + type HostDateTimeSystemTimeZone struct { DynamicData @@ -20700,6 +22233,26 @@ func init() { t["HostDateTimeSystemTimeZone"] = reflect.TypeOf((*HostDateTimeSystemTimeZone)(nil)).Elem() } +type HostDeleteVStorageObjectExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` +} + +func init() { + t["HostDeleteVStorageObjectExRequestType"] = reflect.TypeOf((*HostDeleteVStorageObjectExRequestType)(nil)).Elem() +} + +type HostDeleteVStorageObjectEx_Task HostDeleteVStorageObjectExRequestType + +func init() { + t["HostDeleteVStorageObjectEx_Task"] = reflect.TypeOf((*HostDeleteVStorageObjectEx_Task)(nil)).Elem() +} + +type HostDeleteVStorageObjectEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type HostDeleteVStorageObjectRequestType struct { This ManagedObjectReference `xml:"_this"` Id ID `xml:"id"` @@ -21512,6 +23065,10 @@ type HostHardwareInfo struct { BiosInfo *HostBIOSInfo `xml:"biosInfo,omitempty"` ReliableMemoryInfo *HostReliableMemoryInfo `xml:"reliableMemoryInfo,omitempty"` PersistentMemoryInfo *HostPersistentMemoryInfo `xml:"persistentMemoryInfo,omitempty"` + SgxInfo *HostSgxInfo `xml:"sgxInfo,omitempty"` + SevInfo *HostSevInfo `xml:"sevInfo,omitempty"` + MemoryTieringType string `xml:"memoryTieringType,omitempty"` + MemoryTierInfo []HostMemoryTierInfo `xml:"memoryTierInfo,omitempty"` } func init() { @@ -21569,16 +23126,25 @@ func init() { t["HostHasComponentFailureFault"] = reflect.TypeOf((*HostHasComponentFailureFault)(nil)).Elem() } +type HostHbaCreateSpec struct { + DynamicData +} + +func init() { + t["HostHbaCreateSpec"] = reflect.TypeOf((*HostHbaCreateSpec)(nil)).Elem() +} + type HostHostBusAdapter struct { DynamicData - Key string `xml:"key,omitempty"` - Device string `xml:"device"` - Bus int32 `xml:"bus"` - Status string `xml:"status"` - Model string `xml:"model"` - Driver string `xml:"driver,omitempty"` - Pci string `xml:"pci,omitempty"` + Key string `xml:"key,omitempty"` + Device string `xml:"device"` + Bus int32 `xml:"bus"` + Status string `xml:"status"` + Model string `xml:"model"` + Driver string `xml:"driver,omitempty"` + Pci string `xml:"pci,omitempty"` + StorageProtocol string `xml:"storageProtocol,omitempty"` } func init() { @@ -22239,19 +23805,21 @@ func init() { type HostListSummary struct { DynamicData - Host *ManagedObjectReference `xml:"host,omitempty"` - Hardware *HostHardwareSummary `xml:"hardware,omitempty"` - Runtime *HostRuntimeInfo `xml:"runtime,omitempty"` - Config HostConfigSummary `xml:"config"` - QuickStats HostListSummaryQuickStats `xml:"quickStats"` - OverallStatus ManagedEntityStatus `xml:"overallStatus"` - RebootRequired bool `xml:"rebootRequired"` - CustomValue []BaseCustomFieldValue `xml:"customValue,omitempty,typeattr"` - ManagementServerIp string `xml:"managementServerIp,omitempty"` - MaxEVCModeKey string `xml:"maxEVCModeKey,omitempty"` - CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` - Gateway *HostListSummaryGatewaySummary `xml:"gateway,omitempty"` - TpmAttestation *HostTpmAttestationInfo `xml:"tpmAttestation,omitempty"` + Host *ManagedObjectReference `xml:"host,omitempty"` + Hardware *HostHardwareSummary `xml:"hardware,omitempty"` + Runtime *HostRuntimeInfo `xml:"runtime,omitempty"` + Config HostConfigSummary `xml:"config"` + QuickStats HostListSummaryQuickStats `xml:"quickStats"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + RebootRequired bool `xml:"rebootRequired"` + CustomValue []BaseCustomFieldValue `xml:"customValue,omitempty,typeattr"` + ManagementServerIp string `xml:"managementServerIp,omitempty"` + MaxEVCModeKey string `xml:"maxEVCModeKey,omitempty"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty"` + CurrentEVCGraphicsModeKey string `xml:"currentEVCGraphicsModeKey,omitempty"` + Gateway *HostListSummaryGatewaySummary `xml:"gateway,omitempty"` + TpmAttestation *HostTpmAttestationInfo `xml:"tpmAttestation,omitempty"` + TrustAuthorityAttestationInfos []HostTrustAuthorityAttestationInfo `xml:"trustAuthorityAttestationInfos,omitempty"` } func init() { @@ -22446,6 +24014,7 @@ type HostMaintenanceSpec struct { DynamicData VsanMode *VsanHostDecommissionMode `xml:"vsanMode,omitempty"` + Purpose string `xml:"purpose,omitempty"` } func init() { @@ -22468,6 +24037,8 @@ type HostMemberRuntimeInfo struct { Host ManagedObjectReference `xml:"host"` Status string `xml:"status,omitempty"` StatusDetail string `xml:"statusDetail,omitempty"` + NsxtStatus string `xml:"nsxtStatus,omitempty"` + NsxtStatusDetail string `xml:"nsxtStatusDetail,omitempty"` HealthCheckResult []BaseHostMemberHealthCheckResult `xml:"healthCheckResult,omitempty,typeattr"` } @@ -22503,6 +24074,19 @@ func init() { t["HostMemorySpec"] = reflect.TypeOf((*HostMemorySpec)(nil)).Elem() } +type HostMemoryTierInfo struct { + DynamicData + + Name string `xml:"name"` + Type string `xml:"type"` + Flags []string `xml:"flags,omitempty"` + Size int64 `xml:"size"` +} + +func init() { + t["HostMemoryTierInfo"] = reflect.TypeOf((*HostMemoryTierInfo)(nil)).Elem() +} + type HostMissingNetworksEvent struct { HostDasEvent @@ -22558,6 +24142,20 @@ func init() { t["HostMultipathInfoFixedLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoFixedLogicalUnitPolicy)(nil)).Elem() } +type HostMultipathInfoHppLogicalUnitPolicy struct { + HostMultipathInfoLogicalUnitPolicy + + Bytes int64 `xml:"bytes,omitempty"` + Iops int64 `xml:"iops,omitempty"` + Path string `xml:"path,omitempty"` + LatencyEvalTime int64 `xml:"latencyEvalTime,omitempty"` + SamplingIosPerPath int64 `xml:"samplingIosPerPath,omitempty"` +} + +func init() { + t["HostMultipathInfoHppLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoHppLogicalUnitPolicy)(nil)).Elem() +} + type HostMultipathInfoLogicalUnit struct { DynamicData @@ -22772,6 +24370,7 @@ type HostNetCapabilities struct { DnsConfigSupported bool `xml:"dnsConfigSupported"` DhcpOnVnicSupported bool `xml:"dhcpOnVnicSupported"` IpV6Supported *bool `xml:"ipV6Supported"` + BackupNfcNiocSupported *bool `xml:"backupNfcNiocSupported"` } func init() { @@ -22824,6 +24423,7 @@ type HostNetworkConfig struct { Nat []HostNatServiceConfig `xml:"nat,omitempty"` IpV6Enabled *bool `xml:"ipV6Enabled"` NetStackSpec []HostNetworkConfigNetStackSpec `xml:"netStackSpec,omitempty"` + MigrationStatus string `xml:"migrationStatus,omitempty"` } func init() { @@ -22855,23 +24455,27 @@ func init() { type HostNetworkInfo struct { DynamicData - Vswitch []HostVirtualSwitch `xml:"vswitch,omitempty"` - ProxySwitch []HostProxySwitch `xml:"proxySwitch,omitempty"` - Portgroup []HostPortGroup `xml:"portgroup,omitempty"` - Pnic []PhysicalNic `xml:"pnic,omitempty"` - Vnic []HostVirtualNic `xml:"vnic,omitempty"` - ConsoleVnic []HostVirtualNic `xml:"consoleVnic,omitempty"` - DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` - IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` - ConsoleIpRouteConfig BaseHostIpRouteConfig `xml:"consoleIpRouteConfig,omitempty,typeattr"` - RouteTableInfo *HostIpRouteTableInfo `xml:"routeTableInfo,omitempty"` - Dhcp []HostDhcpService `xml:"dhcp,omitempty"` - Nat []HostNatService `xml:"nat,omitempty"` - IpV6Enabled *bool `xml:"ipV6Enabled"` - AtBootIpV6Enabled *bool `xml:"atBootIpV6Enabled"` - NetStackInstance []HostNetStackInstance `xml:"netStackInstance,omitempty"` - OpaqueSwitch []HostOpaqueSwitch `xml:"opaqueSwitch,omitempty"` - OpaqueNetwork []HostOpaqueNetworkInfo `xml:"opaqueNetwork,omitempty"` + Vswitch []HostVirtualSwitch `xml:"vswitch,omitempty"` + ProxySwitch []HostProxySwitch `xml:"proxySwitch,omitempty"` + Portgroup []HostPortGroup `xml:"portgroup,omitempty"` + Pnic []PhysicalNic `xml:"pnic,omitempty"` + RdmaDevice []HostRdmaDevice `xml:"rdmaDevice,omitempty"` + Vnic []HostVirtualNic `xml:"vnic,omitempty"` + ConsoleVnic []HostVirtualNic `xml:"consoleVnic,omitempty"` + DnsConfig BaseHostDnsConfig `xml:"dnsConfig,omitempty,typeattr"` + IpRouteConfig BaseHostIpRouteConfig `xml:"ipRouteConfig,omitempty,typeattr"` + ConsoleIpRouteConfig BaseHostIpRouteConfig `xml:"consoleIpRouteConfig,omitempty,typeattr"` + RouteTableInfo *HostIpRouteTableInfo `xml:"routeTableInfo,omitempty"` + Dhcp []HostDhcpService `xml:"dhcp,omitempty"` + Nat []HostNatService `xml:"nat,omitempty"` + IpV6Enabled *bool `xml:"ipV6Enabled"` + AtBootIpV6Enabled *bool `xml:"atBootIpV6Enabled"` + NetStackInstance []HostNetStackInstance `xml:"netStackInstance,omitempty"` + OpaqueSwitch []HostOpaqueSwitch `xml:"opaqueSwitch,omitempty"` + OpaqueNetwork []HostOpaqueNetworkInfo `xml:"opaqueNetwork,omitempty"` + NsxTransportNodeId string `xml:"nsxTransportNodeId,omitempty"` + NvdsToVdsMigrationRequired *bool `xml:"nvdsToVdsMigrationRequired"` + MigrationStatus string `xml:"migrationStatus,omitempty"` } func init() { @@ -22934,6 +24538,16 @@ func init() { t["HostNewNetworkConnectInfo"] = reflect.TypeOf((*HostNewNetworkConnectInfo)(nil)).Elem() } +type HostNfcConnectionInfo struct { + HostDataTransportConnectionInfo + + StreamingMemoryConsumed int64 `xml:"streamingMemoryConsumed,omitempty"` +} + +func init() { + t["HostNfcConnectionInfo"] = reflect.TypeOf((*HostNfcConnectionInfo)(nil)).Elem() +} + type HostNicFailureCriteria struct { DynamicData @@ -23101,6 +24715,197 @@ func init() { t["HostNumericSensorInfo"] = reflect.TypeOf((*HostNumericSensorInfo)(nil)).Elem() } +type HostNvmeConnectSpec struct { + HostNvmeSpec + + Subnqn string `xml:"subnqn"` + ControllerId int32 `xml:"controllerId,omitempty"` + AdminQueueSize int32 `xml:"adminQueueSize,omitempty"` + KeepAliveTimeout int32 `xml:"keepAliveTimeout,omitempty"` +} + +func init() { + t["HostNvmeConnectSpec"] = reflect.TypeOf((*HostNvmeConnectSpec)(nil)).Elem() +} + +type HostNvmeController struct { + DynamicData + + Key string `xml:"key"` + ControllerNumber int32 `xml:"controllerNumber"` + Subnqn string `xml:"subnqn"` + Name string `xml:"name"` + AssociatedAdapter string `xml:"associatedAdapter"` + TransportType string `xml:"transportType"` + FusedOperationSupported bool `xml:"fusedOperationSupported"` + NumberOfQueues int32 `xml:"numberOfQueues"` + QueueSize int32 `xml:"queueSize"` + AttachedNamespace []HostNvmeNamespace `xml:"attachedNamespace,omitempty"` + VendorId string `xml:"vendorId,omitempty"` + Model string `xml:"model,omitempty"` + SerialNumber string `xml:"serialNumber,omitempty"` + FirmwareVersion string `xml:"firmwareVersion,omitempty"` +} + +func init() { + t["HostNvmeController"] = reflect.TypeOf((*HostNvmeController)(nil)).Elem() +} + +type HostNvmeDisconnectSpec struct { + DynamicData + + HbaName string `xml:"hbaName"` + Subnqn string `xml:"subnqn,omitempty"` + ControllerNumber int32 `xml:"controllerNumber,omitempty"` +} + +func init() { + t["HostNvmeDisconnectSpec"] = reflect.TypeOf((*HostNvmeDisconnectSpec)(nil)).Elem() +} + +type HostNvmeDiscoverSpec struct { + HostNvmeSpec + + AutoConnect *bool `xml:"autoConnect"` + RootDiscoveryController *bool `xml:"rootDiscoveryController"` +} + +func init() { + t["HostNvmeDiscoverSpec"] = reflect.TypeOf((*HostNvmeDiscoverSpec)(nil)).Elem() +} + +type HostNvmeDiscoveryLog struct { + DynamicData + + Entry []HostNvmeDiscoveryLogEntry `xml:"entry,omitempty"` + Complete bool `xml:"complete"` +} + +func init() { + t["HostNvmeDiscoveryLog"] = reflect.TypeOf((*HostNvmeDiscoveryLog)(nil)).Elem() +} + +type HostNvmeDiscoveryLogEntry struct { + DynamicData + + Subnqn string `xml:"subnqn"` + SubsystemType string `xml:"subsystemType"` + SubsystemPortId int32 `xml:"subsystemPortId"` + ControllerId int32 `xml:"controllerId"` + AdminQueueMaxSize int32 `xml:"adminQueueMaxSize"` + TransportParameters BaseHostNvmeTransportParameters `xml:"transportParameters,typeattr"` + TransportRequirements string `xml:"transportRequirements"` + Connected bool `xml:"connected"` +} + +func init() { + t["HostNvmeDiscoveryLogEntry"] = reflect.TypeOf((*HostNvmeDiscoveryLogEntry)(nil)).Elem() +} + +type HostNvmeNamespace struct { + DynamicData + + Key string `xml:"key"` + Name string `xml:"name"` + Id int32 `xml:"id"` + BlockSize int32 `xml:"blockSize"` + CapacityInBlocks int64 `xml:"capacityInBlocks"` +} + +func init() { + t["HostNvmeNamespace"] = reflect.TypeOf((*HostNvmeNamespace)(nil)).Elem() +} + +type HostNvmeOpaqueTransportParameters struct { + HostNvmeTransportParameters + + Trtype string `xml:"trtype"` + Traddr string `xml:"traddr"` + Adrfam string `xml:"adrfam"` + Trsvcid string `xml:"trsvcid"` + Tsas []byte `xml:"tsas"` +} + +func init() { + t["HostNvmeOpaqueTransportParameters"] = reflect.TypeOf((*HostNvmeOpaqueTransportParameters)(nil)).Elem() +} + +type HostNvmeOverFibreChannelParameters struct { + HostNvmeTransportParameters + + NodeWorldWideName int64 `xml:"nodeWorldWideName"` + PortWorldWideName int64 `xml:"portWorldWideName"` +} + +func init() { + t["HostNvmeOverFibreChannelParameters"] = reflect.TypeOf((*HostNvmeOverFibreChannelParameters)(nil)).Elem() +} + +type HostNvmeOverRdmaParameters struct { + HostNvmeTransportParameters + + Address string `xml:"address"` + AddressFamily string `xml:"addressFamily,omitempty"` + PortNumber int32 `xml:"portNumber,omitempty"` +} + +func init() { + t["HostNvmeOverRdmaParameters"] = reflect.TypeOf((*HostNvmeOverRdmaParameters)(nil)).Elem() +} + +type HostNvmeOverTcpParameters struct { + HostNvmeTransportParameters + + Address string `xml:"address"` + PortNumber int32 `xml:"portNumber,omitempty"` + DigestVerification string `xml:"digestVerification,omitempty"` +} + +func init() { + t["HostNvmeOverTcpParameters"] = reflect.TypeOf((*HostNvmeOverTcpParameters)(nil)).Elem() +} + +type HostNvmeSpec struct { + DynamicData + + HbaName string `xml:"hbaName"` + TransportParameters BaseHostNvmeTransportParameters `xml:"transportParameters,typeattr"` +} + +func init() { + t["HostNvmeSpec"] = reflect.TypeOf((*HostNvmeSpec)(nil)).Elem() +} + +type HostNvmeTopology struct { + DynamicData + + Adapter []HostNvmeTopologyInterface `xml:"adapter,omitempty"` +} + +func init() { + t["HostNvmeTopology"] = reflect.TypeOf((*HostNvmeTopology)(nil)).Elem() +} + +type HostNvmeTopologyInterface struct { + DynamicData + + Key string `xml:"key"` + Adapter string `xml:"adapter"` + ConnectedController []HostNvmeController `xml:"connectedController,omitempty"` +} + +func init() { + t["HostNvmeTopologyInterface"] = reflect.TypeOf((*HostNvmeTopologyInterface)(nil)).Elem() +} + +type HostNvmeTransportParameters struct { + DynamicData +} + +func init() { + t["HostNvmeTransportParameters"] = reflect.TypeOf((*HostNvmeTransportParameters)(nil)).Elem() +} + type HostOpaqueNetworkInfo struct { DynamicData @@ -23283,6 +25088,8 @@ type HostPciPassthruConfig struct { Id string `xml:"id"` PassthruEnabled bool `xml:"passthruEnabled"` + ApplyNow *bool `xml:"applyNow"` + HardwareLabel string `xml:"hardwareLabel,omitempty"` } func init() { @@ -23297,12 +25104,29 @@ type HostPciPassthruInfo struct { PassthruEnabled bool `xml:"passthruEnabled"` PassthruCapable bool `xml:"passthruCapable"` PassthruActive bool `xml:"passthruActive"` + HardwareLabel string `xml:"hardwareLabel,omitempty"` } func init() { t["HostPciPassthruInfo"] = reflect.TypeOf((*HostPciPassthruInfo)(nil)).Elem() } +type HostPcieHba struct { + HostHostBusAdapter +} + +func init() { + t["HostPcieHba"] = reflect.TypeOf((*HostPcieHba)(nil)).Elem() +} + +type HostPcieTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostPcieTargetTransport"] = reflect.TypeOf((*HostPcieTargetTransport)(nil)).Elem() +} + type HostPersistentMemoryInfo struct { DynamicData @@ -23738,18 +25562,25 @@ func init() { type HostProxySwitch struct { DynamicData - DvsUuid string `xml:"dvsUuid"` - DvsName string `xml:"dvsName"` - Key string `xml:"key"` - NumPorts int32 `xml:"numPorts"` - ConfigNumPorts int32 `xml:"configNumPorts,omitempty"` - NumPortsAvailable int32 `xml:"numPortsAvailable"` - UplinkPort []KeyValue `xml:"uplinkPort,omitempty"` - Mtu int32 `xml:"mtu,omitempty"` - Pnic []string `xml:"pnic,omitempty"` - Spec HostProxySwitchSpec `xml:"spec"` - HostLag []HostProxySwitchHostLagConfig `xml:"hostLag,omitempty"` - NetworkReservationSupported *bool `xml:"networkReservationSupported"` + DvsUuid string `xml:"dvsUuid"` + DvsName string `xml:"dvsName"` + Key string `xml:"key"` + NumPorts int32 `xml:"numPorts"` + ConfigNumPorts int32 `xml:"configNumPorts,omitempty"` + NumPortsAvailable int32 `xml:"numPortsAvailable"` + UplinkPort []KeyValue `xml:"uplinkPort,omitempty"` + Mtu int32 `xml:"mtu,omitempty"` + Pnic []string `xml:"pnic,omitempty"` + Spec HostProxySwitchSpec `xml:"spec"` + HostLag []HostProxySwitchHostLagConfig `xml:"hostLag,omitempty"` + NetworkReservationSupported *bool `xml:"networkReservationSupported"` + NsxtEnabled *bool `xml:"nsxtEnabled"` + EnsEnabled *bool `xml:"ensEnabled"` + EnsInterruptEnabled *bool `xml:"ensInterruptEnabled"` + TransportZones []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"transportZones,omitempty"` + NsxUsedUplinkPort []string `xml:"nsxUsedUplinkPort,omitempty"` + NsxtStatus string `xml:"nsxtStatus,omitempty"` + NsxtStatusDetail string `xml:"nsxtStatusDetail,omitempty"` } func init() { @@ -23790,6 +25621,117 @@ func init() { t["HostProxySwitchSpec"] = reflect.TypeOf((*HostProxySwitchSpec)(nil)).Elem() } +type HostPtpConfig struct { + DynamicData + + Domain int32 `xml:"domain,omitempty"` + Port []HostPtpConfigPtpPort `xml:"port,omitempty"` +} + +func init() { + t["HostPtpConfig"] = reflect.TypeOf((*HostPtpConfig)(nil)).Elem() +} + +type HostPtpConfigPtpPort struct { + DynamicData + + Index int32 `xml:"index"` + DeviceType string `xml:"deviceType,omitempty"` + Device string `xml:"device,omitempty"` + IpConfig *HostIpConfig `xml:"ipConfig,omitempty"` +} + +func init() { + t["HostPtpConfigPtpPort"] = reflect.TypeOf((*HostPtpConfigPtpPort)(nil)).Elem() +} + +type HostQualifiedName struct { + DynamicData + + Value string `xml:"value"` + Type string `xml:"type"` +} + +func init() { + t["HostQualifiedName"] = reflect.TypeOf((*HostQualifiedName)(nil)).Elem() +} + +type HostRdmaDevice struct { + DynamicData + + Key string `xml:"key"` + Device string `xml:"device"` + Driver string `xml:"driver,omitempty"` + Description string `xml:"description,omitempty"` + Backing BaseHostRdmaDeviceBacking `xml:"backing,omitempty,typeattr"` + ConnectionInfo HostRdmaDeviceConnectionInfo `xml:"connectionInfo"` + Capability HostRdmaDeviceCapability `xml:"capability"` +} + +func init() { + t["HostRdmaDevice"] = reflect.TypeOf((*HostRdmaDevice)(nil)).Elem() +} + +type HostRdmaDeviceBacking struct { + DynamicData +} + +func init() { + t["HostRdmaDeviceBacking"] = reflect.TypeOf((*HostRdmaDeviceBacking)(nil)).Elem() +} + +type HostRdmaDeviceCapability struct { + DynamicData + + RoceV1Capable bool `xml:"roceV1Capable"` + RoceV2Capable bool `xml:"roceV2Capable"` + IWarpCapable bool `xml:"iWarpCapable"` +} + +func init() { + t["HostRdmaDeviceCapability"] = reflect.TypeOf((*HostRdmaDeviceCapability)(nil)).Elem() +} + +type HostRdmaDeviceConnectionInfo struct { + DynamicData + + State string `xml:"state"` + Mtu int32 `xml:"mtu"` + SpeedInMbps int32 `xml:"speedInMbps"` +} + +func init() { + t["HostRdmaDeviceConnectionInfo"] = reflect.TypeOf((*HostRdmaDeviceConnectionInfo)(nil)).Elem() +} + +type HostRdmaDevicePnicBacking struct { + HostRdmaDeviceBacking + + PairedUplink string `xml:"pairedUplink"` +} + +func init() { + t["HostRdmaDevicePnicBacking"] = reflect.TypeOf((*HostRdmaDevicePnicBacking)(nil)).Elem() +} + +type HostRdmaHba struct { + HostHostBusAdapter + + AssociatedRdmaDevice string `xml:"associatedRdmaDevice,omitempty"` +} + +func init() { + t["HostRdmaHba"] = reflect.TypeOf((*HostRdmaHba)(nil)).Elem() +} + +type HostRdmaTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostRdmaTargetTransport"] = reflect.TypeOf((*HostRdmaTargetTransport)(nil)).Elem() +} + type HostReconcileDatastoreInventoryRequestType struct { This ManagedObjectReference `xml:"_this"` Datastore ManagedObjectReference `xml:"datastore"` @@ -23949,6 +25891,50 @@ func init() { t["HostRetrieveVStorageObject"] = reflect.TypeOf((*HostRetrieveVStorageObject)(nil)).Elem() } +type HostRetrieveVStorageObjectMetadata HostRetrieveVStorageObjectMetadataRequestType + +func init() { + t["HostRetrieveVStorageObjectMetadata"] = reflect.TypeOf((*HostRetrieveVStorageObjectMetadata)(nil)).Elem() +} + +type HostRetrieveVStorageObjectMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId *ID `xml:"snapshotId,omitempty"` + Prefix string `xml:"prefix,omitempty"` +} + +func init() { + t["HostRetrieveVStorageObjectMetadataRequestType"] = reflect.TypeOf((*HostRetrieveVStorageObjectMetadataRequestType)(nil)).Elem() +} + +type HostRetrieveVStorageObjectMetadataResponse struct { + Returnval []KeyValue `xml:"returnval,omitempty"` +} + +type HostRetrieveVStorageObjectMetadataValue HostRetrieveVStorageObjectMetadataValueRequestType + +func init() { + t["HostRetrieveVStorageObjectMetadataValue"] = reflect.TypeOf((*HostRetrieveVStorageObjectMetadataValue)(nil)).Elem() +} + +type HostRetrieveVStorageObjectMetadataValueRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId *ID `xml:"snapshotId,omitempty"` + Key string `xml:"key"` +} + +func init() { + t["HostRetrieveVStorageObjectMetadataValueRequestType"] = reflect.TypeOf((*HostRetrieveVStorageObjectMetadataValueRequestType)(nil)).Elem() +} + +type HostRetrieveVStorageObjectMetadataValueResponse struct { + Returnval string `xml:"returnval"` +} + type HostRetrieveVStorageObjectRequestType struct { This ManagedObjectReference `xml:"_this"` Id ID `xml:"id"` @@ -23986,21 +25972,23 @@ type HostRetrieveVStorageObjectStateResponse struct { type HostRuntimeInfo struct { DynamicData - ConnectionState HostSystemConnectionState `xml:"connectionState"` - PowerState HostSystemPowerState `xml:"powerState"` - StandbyMode string `xml:"standbyMode,omitempty"` - InMaintenanceMode bool `xml:"inMaintenanceMode"` - InQuarantineMode *bool `xml:"inQuarantineMode"` - BootTime *time.Time `xml:"bootTime"` - HealthSystemRuntime *HealthSystemRuntime `xml:"healthSystemRuntime,omitempty"` - DasHostState *ClusterDasFdmHostState `xml:"dasHostState,omitempty"` - TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues,omitempty"` - VsanRuntimeInfo *VsanHostRuntimeInfo `xml:"vsanRuntimeInfo,omitempty"` - NetworkRuntimeInfo *HostRuntimeInfoNetworkRuntimeInfo `xml:"networkRuntimeInfo,omitempty"` - VFlashResourceRuntimeInfo *HostVFlashManagerVFlashResourceRunTimeInfo `xml:"vFlashResourceRuntimeInfo,omitempty"` - HostMaxVirtualDiskCapacity int64 `xml:"hostMaxVirtualDiskCapacity,omitempty"` - CryptoState string `xml:"cryptoState,omitempty"` - CryptoKeyId *CryptoKeyId `xml:"cryptoKeyId,omitempty"` + ConnectionState HostSystemConnectionState `xml:"connectionState"` + PowerState HostSystemPowerState `xml:"powerState"` + StandbyMode string `xml:"standbyMode,omitempty"` + InMaintenanceMode bool `xml:"inMaintenanceMode"` + InQuarantineMode *bool `xml:"inQuarantineMode"` + BootTime *time.Time `xml:"bootTime"` + HealthSystemRuntime *HealthSystemRuntime `xml:"healthSystemRuntime,omitempty"` + DasHostState *ClusterDasFdmHostState `xml:"dasHostState,omitempty"` + TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues,omitempty"` + VsanRuntimeInfo *VsanHostRuntimeInfo `xml:"vsanRuntimeInfo,omitempty"` + NetworkRuntimeInfo *HostRuntimeInfoNetworkRuntimeInfo `xml:"networkRuntimeInfo,omitempty"` + VFlashResourceRuntimeInfo *HostVFlashManagerVFlashResourceRunTimeInfo `xml:"vFlashResourceRuntimeInfo,omitempty"` + HostMaxVirtualDiskCapacity int64 `xml:"hostMaxVirtualDiskCapacity,omitempty"` + CryptoState string `xml:"cryptoState,omitempty"` + CryptoKeyId *CryptoKeyId `xml:"cryptoKeyId,omitempty"` + StatelessNvdsMigrationReady string `xml:"statelessNvdsMigrationReady,omitempty"` + StateEncryption *HostRuntimeInfoStateEncryptionInfo `xml:"stateEncryption,omitempty"` } func init() { @@ -24032,6 +26020,18 @@ func init() { t["HostRuntimeInfoNetworkRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfoNetworkRuntimeInfo)(nil)).Elem() } +type HostRuntimeInfoStateEncryptionInfo struct { + DynamicData + + ProtectionMode string `xml:"protectionMode"` + RequireSecureBoot *bool `xml:"requireSecureBoot"` + RequireExecInstalledOnly *bool `xml:"requireExecInstalledOnly"` +} + +func init() { + t["HostRuntimeInfoStateEncryptionInfo"] = reflect.TypeOf((*HostRuntimeInfoStateEncryptionInfo)(nil)).Elem() +} + type HostScheduleReconcileDatastoreInventory HostScheduleReconcileDatastoreInventoryRequestType func init() { @@ -24239,6 +26239,30 @@ func init() { type HostSetVStorageObjectControlFlagsResponse struct { } +type HostSevInfo struct { + DynamicData + + SevState string `xml:"sevState"` + MaxSevEsGuests int64 `xml:"maxSevEsGuests"` +} + +func init() { + t["HostSevInfo"] = reflect.TypeOf((*HostSevInfo)(nil)).Elem() +} + +type HostSgxInfo struct { + DynamicData + + SgxState string `xml:"sgxState"` + TotalEpcMemory int64 `xml:"totalEpcMemory"` + FlcMode string `xml:"flcMode"` + LePubKeyHash string `xml:"lePubKeyHash,omitempty"` +} + +func init() { + t["HostSgxInfo"] = reflect.TypeOf((*HostSgxInfo)(nil)).Elem() +} + type HostSharedGpuCapabilities struct { DynamicData @@ -24358,6 +26382,14 @@ func init() { t["HostSpecification"] = reflect.TypeOf((*HostSpecification)(nil)).Elem() } +type HostSpecificationChangedEvent struct { + HostEvent +} + +func init() { + t["HostSpecificationChangedEvent"] = reflect.TypeOf((*HostSpecificationChangedEvent)(nil)).Elem() +} + type HostSpecificationOperationFailed struct { VimFault @@ -24374,6 +26406,24 @@ func init() { t["HostSpecificationOperationFailedFault"] = reflect.TypeOf((*HostSpecificationOperationFailedFault)(nil)).Elem() } +type HostSpecificationRequireEvent struct { + HostEvent +} + +func init() { + t["HostSpecificationRequireEvent"] = reflect.TypeOf((*HostSpecificationRequireEvent)(nil)).Elem() +} + +type HostSpecificationUpdateEvent struct { + HostEvent + + HostSpec HostSpecification `xml:"hostSpec"` +} + +func init() { + t["HostSpecificationUpdateEvent"] = reflect.TypeOf((*HostSpecificationUpdateEvent)(nil)).Elem() +} + type HostSriovConfig struct { HostPciPassthruConfig @@ -24458,6 +26508,7 @@ type HostStorageDeviceInfo struct { HostBusAdapter []BaseHostHostBusAdapter `xml:"hostBusAdapter,omitempty,typeattr"` ScsiLun []BaseScsiLun `xml:"scsiLun,omitempty,typeattr"` ScsiTopology *HostScsiTopology `xml:"scsiTopology,omitempty"` + NvmeTopology *HostNvmeTopology `xml:"nvmeTopology,omitempty"` MultipathInfo *HostMultipathInfo `xml:"multipathInfo,omitempty"` PlugStoreTopology *HostPlugStoreTopology `xml:"plugStoreTopology,omitempty"` SoftwareInternetScsiEnabled bool `xml:"softwareInternetScsiEnabled"` @@ -24534,6 +26585,26 @@ func init() { t["HostSubSpecification"] = reflect.TypeOf((*HostSubSpecification)(nil)).Elem() } +type HostSubSpecificationDeleteEvent struct { + HostEvent + + SubSpecName string `xml:"subSpecName"` +} + +func init() { + t["HostSubSpecificationDeleteEvent"] = reflect.TypeOf((*HostSubSpecificationDeleteEvent)(nil)).Elem() +} + +type HostSubSpecificationUpdateEvent struct { + HostEvent + + HostSubSpec HostSubSpecification `xml:"hostSubSpec"` +} + +func init() { + t["HostSubSpecificationUpdateEvent"] = reflect.TypeOf((*HostSubSpecificationUpdateEvent)(nil)).Elem() +} + type HostSyncFailedEvent struct { HostEvent @@ -24584,6 +26655,7 @@ type HostSystemInfo struct { Uuid string `xml:"uuid"` OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty"` SerialNumber string `xml:"serialNumber,omitempty"` + QualifiedName []HostQualifiedName `xml:"qualifiedName,omitempty"` } func init() { @@ -24685,6 +26757,34 @@ func init() { t["HostTargetTransport"] = reflect.TypeOf((*HostTargetTransport)(nil)).Elem() } +type HostTcpHba struct { + HostHostBusAdapter + + AssociatedPnic string `xml:"associatedPnic,omitempty"` +} + +func init() { + t["HostTcpHba"] = reflect.TypeOf((*HostTcpHba)(nil)).Elem() +} + +type HostTcpHbaCreateSpec struct { + HostHbaCreateSpec + + Pnic string `xml:"pnic"` +} + +func init() { + t["HostTcpHbaCreateSpec"] = reflect.TypeOf((*HostTcpHbaCreateSpec)(nil)).Elem() +} + +type HostTcpTargetTransport struct { + HostTargetTransport +} + +func init() { + t["HostTcpTargetTransport"] = reflect.TypeOf((*HostTcpTargetTransport)(nil)).Elem() +} + type HostTpmAttestationInfo struct { DynamicData @@ -24761,6 +26861,14 @@ func init() { t["HostTpmEventLogEntry"] = reflect.TypeOf((*HostTpmEventLogEntry)(nil)).Elem() } +type HostTpmNvTagEventDetails struct { + HostTpmBootSecurityOptionEventDetails +} + +func init() { + t["HostTpmNvTagEventDetails"] = reflect.TypeOf((*HostTpmNvTagEventDetails)(nil)).Elem() +} + type HostTpmOptionEventDetails struct { HostTpmEventDetails @@ -24785,6 +26893,20 @@ func init() { t["HostTpmSoftwareComponentEventDetails"] = reflect.TypeOf((*HostTpmSoftwareComponentEventDetails)(nil)).Elem() } +type HostTrustAuthorityAttestationInfo struct { + DynamicData + + AttestationStatus string `xml:"attestationStatus"` + ServiceId string `xml:"serviceId,omitempty"` + AttestedAt *time.Time `xml:"attestedAt"` + AttestedUntil *time.Time `xml:"attestedUntil"` + Messages []LocalizableMessage `xml:"messages,omitempty"` +} + +func init() { + t["HostTrustAuthorityAttestationInfo"] = reflect.TypeOf((*HostTrustAuthorityAttestationInfo)(nil)).Elem() +} + type HostUnresolvedVmfsExtent struct { DynamicData @@ -24861,6 +26983,50 @@ func init() { t["HostUnresolvedVmfsVolumeResolveStatus"] = reflect.TypeOf((*HostUnresolvedVmfsVolumeResolveStatus)(nil)).Elem() } +type HostUpdateVStorageObjectMetadataExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Metadata []KeyValue `xml:"metadata,omitempty"` + DeleteKeys []string `xml:"deleteKeys,omitempty"` +} + +func init() { + t["HostUpdateVStorageObjectMetadataExRequestType"] = reflect.TypeOf((*HostUpdateVStorageObjectMetadataExRequestType)(nil)).Elem() +} + +type HostUpdateVStorageObjectMetadataEx_Task HostUpdateVStorageObjectMetadataExRequestType + +func init() { + t["HostUpdateVStorageObjectMetadataEx_Task"] = reflect.TypeOf((*HostUpdateVStorageObjectMetadataEx_Task)(nil)).Elem() +} + +type HostUpdateVStorageObjectMetadataEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type HostUpdateVStorageObjectMetadataRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Metadata []KeyValue `xml:"metadata,omitempty"` + DeleteKeys []string `xml:"deleteKeys,omitempty"` +} + +func init() { + t["HostUpdateVStorageObjectMetadataRequestType"] = reflect.TypeOf((*HostUpdateVStorageObjectMetadataRequestType)(nil)).Elem() +} + +type HostUpdateVStorageObjectMetadata_Task HostUpdateVStorageObjectMetadataRequestType + +func init() { + t["HostUpdateVStorageObjectMetadata_Task"] = reflect.TypeOf((*HostUpdateVStorageObjectMetadata_Task)(nil)).Elem() +} + +type HostUpdateVStorageObjectMetadata_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type HostUpgradeFailedEvent struct { HostEvent } @@ -25008,6 +27174,39 @@ func init() { t["HostVMotionInfo"] = reflect.TypeOf((*HostVMotionInfo)(nil)).Elem() } +type HostVMotionManagerDstInstantCloneResult struct { + DynamicData + + DstVmId int32 `xml:"dstVmId,omitempty"` + StartTime int64 `xml:"startTime,omitempty"` + CptLoadTime int64 `xml:"cptLoadTime,omitempty"` + CptLoadDoneTime int64 `xml:"cptLoadDoneTime,omitempty"` + ReplicateMemDoneTime int64 `xml:"replicateMemDoneTime,omitempty"` + EndTime int64 `xml:"endTime,omitempty"` + CptXferTime int64 `xml:"cptXferTime,omitempty"` + CptCacheUsed int64 `xml:"cptCacheUsed,omitempty"` + DevCptStreamSize int64 `xml:"devCptStreamSize,omitempty"` + DevCptStreamTime int64 `xml:"devCptStreamTime,omitempty"` +} + +func init() { + t["HostVMotionManagerDstInstantCloneResult"] = reflect.TypeOf((*HostVMotionManagerDstInstantCloneResult)(nil)).Elem() +} + +type HostVMotionManagerSrcInstantCloneResult struct { + DynamicData + + StartTime int64 `xml:"startTime,omitempty"` + QuiesceTime int64 `xml:"quiesceTime,omitempty"` + QuiesceDoneTime int64 `xml:"quiesceDoneTime,omitempty"` + ResumeDoneTime int64 `xml:"resumeDoneTime,omitempty"` + EndTime int64 `xml:"endTime,omitempty"` +} + +func init() { + t["HostVMotionManagerSrcInstantCloneResult"] = reflect.TypeOf((*HostVMotionManagerSrcInstantCloneResult)(nil)).Elem() +} + type HostVMotionNetConfig struct { DynamicData @@ -25256,6 +27455,7 @@ type HostVirtualNicSpec struct { ExternalId string `xml:"externalId,omitempty"` PinnedPnic string `xml:"pinnedPnic,omitempty"` IpRouteSpec *HostVirtualNicIpRouteSpec `xml:"ipRouteSpec,omitempty"` + SystemOwned *bool `xml:"systemOwned"` } func init() { @@ -25704,6 +27904,36 @@ func init() { t["HttpNfcLeaseManifestEntry"] = reflect.TypeOf((*HttpNfcLeaseManifestEntry)(nil)).Elem() } +type HttpNfcLeaseProbeResult struct { + DynamicData + + ServerAccessible bool `xml:"serverAccessible"` +} + +func init() { + t["HttpNfcLeaseProbeResult"] = reflect.TypeOf((*HttpNfcLeaseProbeResult)(nil)).Elem() +} + +type HttpNfcLeaseProbeUrls HttpNfcLeaseProbeUrlsRequestType + +func init() { + t["HttpNfcLeaseProbeUrls"] = reflect.TypeOf((*HttpNfcLeaseProbeUrls)(nil)).Elem() +} + +type HttpNfcLeaseProbeUrlsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Files []HttpNfcLeaseSourceFile `xml:"files,omitempty"` + Timeout int32 `xml:"timeout,omitempty"` +} + +func init() { + t["HttpNfcLeaseProbeUrlsRequestType"] = reflect.TypeOf((*HttpNfcLeaseProbeUrlsRequestType)(nil)).Elem() +} + +type HttpNfcLeaseProbeUrlsResponse struct { + Returnval []HttpNfcLeaseProbeResult `xml:"returnval,omitempty"` +} + type HttpNfcLeaseProgress HttpNfcLeaseProgressRequestType func init() { @@ -27319,7 +29549,7 @@ func init() { type InvalidOperationOnSecondaryVm struct { VmFaultToleranceIssue - InstanceUuid string `xml:"instanceUuid,omitempty"` + InstanceUuid string `xml:"instanceUuid"` } func init() { @@ -27716,6 +29946,25 @@ func init() { t["IpRouteProfile"] = reflect.TypeOf((*IpRouteProfile)(nil)).Elem() } +type IsKmsClusterActive IsKmsClusterActiveRequestType + +func init() { + t["IsKmsClusterActive"] = reflect.TypeOf((*IsKmsClusterActive)(nil)).Elem() +} + +type IsKmsClusterActiveRequestType struct { + This ManagedObjectReference `xml:"_this"` + Cluster *KeyProviderId `xml:"cluster,omitempty"` +} + +func init() { + t["IsKmsClusterActiveRequestType"] = reflect.TypeOf((*IsKmsClusterActiveRequestType)(nil)).Elem() +} + +type IsKmsClusterActiveResponse struct { + Returnval bool `xml:"returnval"` +} + type IsSharedGraphicsActive IsSharedGraphicsActiveRequestType func init() { @@ -28083,6 +30332,22 @@ func init() { t["KeyAnyValue"] = reflect.TypeOf((*KeyAnyValue)(nil)).Elem() } +type KeyNotFound struct { + VimFault + + Key string `xml:"key"` +} + +func init() { + t["KeyNotFound"] = reflect.TypeOf((*KeyNotFound)(nil)).Elem() +} + +type KeyNotFoundFault KeyNotFound + +func init() { + t["KeyNotFoundFault"] = reflect.TypeOf((*KeyNotFoundFault)(nil)).Elem() +} + type KeyProviderId struct { DynamicData @@ -28107,9 +30372,14 @@ func init() { type KmipClusterInfo struct { DynamicData - ClusterId KeyProviderId `xml:"clusterId"` - Servers []KmipServerInfo `xml:"servers,omitempty"` - UseAsDefault bool `xml:"useAsDefault"` + ClusterId KeyProviderId `xml:"clusterId"` + Servers []KmipServerInfo `xml:"servers,omitempty"` + UseAsDefault bool `xml:"useAsDefault"` + ManagementType string `xml:"managementType,omitempty"` + UseAsEntityDefault []ManagedObjectReference `xml:"useAsEntityDefault,omitempty"` + HasBackup *bool `xml:"hasBackup"` + TpmRequired *bool `xml:"tpmRequired"` + KeyId string `xml:"keyId,omitempty"` } func init() { @@ -28754,6 +31024,27 @@ type ListKmipServersResponse struct { Returnval []KmipClusterInfo `xml:"returnval,omitempty"` } +type ListKmsClusters ListKmsClustersRequestType + +func init() { + t["ListKmsClusters"] = reflect.TypeOf((*ListKmsClusters)(nil)).Elem() +} + +type ListKmsClustersRequestType struct { + This ManagedObjectReference `xml:"_this"` + IncludeKmsServers *bool `xml:"includeKmsServers"` + ManagementTypeFilter int32 `xml:"managementTypeFilter,omitempty"` + StatusFilter int32 `xml:"statusFilter,omitempty"` +} + +func init() { + t["ListKmsClustersRequestType"] = reflect.TypeOf((*ListKmsClustersRequestType)(nil)).Elem() +} + +type ListKmsClustersResponse struct { + Returnval []KmipClusterInfo `xml:"returnval,omitempty"` +} + type ListProcessesInGuest ListProcessesInGuestRequestType func init() { @@ -29495,6 +31786,63 @@ func init() { type MarkForRemovalResponse struct { } +type MarkPerenniallyReserved MarkPerenniallyReservedRequestType + +func init() { + t["MarkPerenniallyReserved"] = reflect.TypeOf((*MarkPerenniallyReserved)(nil)).Elem() +} + +type MarkPerenniallyReservedExRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid []string `xml:"lunUuid,omitempty"` + State bool `xml:"state"` +} + +func init() { + t["MarkPerenniallyReservedExRequestType"] = reflect.TypeOf((*MarkPerenniallyReservedExRequestType)(nil)).Elem() +} + +type MarkPerenniallyReservedEx_Task MarkPerenniallyReservedExRequestType + +func init() { + t["MarkPerenniallyReservedEx_Task"] = reflect.TypeOf((*MarkPerenniallyReservedEx_Task)(nil)).Elem() +} + +type MarkPerenniallyReservedEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + +type MarkPerenniallyReservedRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` + State bool `xml:"state"` +} + +func init() { + t["MarkPerenniallyReservedRequestType"] = reflect.TypeOf((*MarkPerenniallyReservedRequestType)(nil)).Elem() +} + +type MarkPerenniallyReservedResponse struct { +} + +type MarkServiceProviderEntities MarkServiceProviderEntitiesRequestType + +func init() { + t["MarkServiceProviderEntities"] = reflect.TypeOf((*MarkServiceProviderEntities)(nil)).Elem() +} + +type MarkServiceProviderEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["MarkServiceProviderEntitiesRequestType"] = reflect.TypeOf((*MarkServiceProviderEntitiesRequestType)(nil)).Elem() +} + +type MarkServiceProviderEntitiesResponse struct { +} + type MemoryFileFormatNotSupportedByDatastore struct { UnsupportedDatastore @@ -31006,6 +33354,7 @@ type NetworkProfile struct { DvsHostNic []DvsHostVNicProfile `xml:"dvsHostNic,omitempty"` NsxHostNic []NsxHostVNicProfile `xml:"nsxHostNic,omitempty"` NetStackInstance []NetStackInstanceProfile `xml:"netStackInstance,omitempty"` + OpaqueSwitch *OpaqueSwitchProfile `xml:"opaqueSwitch,omitempty"` } func init() { @@ -31037,8 +33386,8 @@ type NetworkSummary struct { Network *ManagedObjectReference `xml:"network,omitempty"` Name string `xml:"name"` Accessible bool `xml:"accessible"` - IpPoolName string `xml:"ipPoolName,omitempty"` - IpPoolId int32 `xml:"ipPoolId,omitempty"` + IpPoolName string `xml:"ipPoolName"` + IpPoolId *int32 `xml:"ipPoolId"` } func init() { @@ -32101,6 +34450,22 @@ func init() { t["NvdimmNamespaceDeleteSpec"] = reflect.TypeOf((*NvdimmNamespaceDeleteSpec)(nil)).Elem() } +type NvdimmNamespaceDetails struct { + DynamicData + + Uuid string `xml:"uuid"` + FriendlyName string `xml:"friendlyName"` + Size int64 `xml:"size"` + Type string `xml:"type"` + NamespaceHealthStatus string `xml:"namespaceHealthStatus"` + InterleavesetID int32 `xml:"interleavesetID"` + State string `xml:"state"` +} + +func init() { + t["NvdimmNamespaceDetails"] = reflect.TypeOf((*NvdimmNamespaceDetails)(nil)).Elem() +} + type NvdimmNamespaceInfo struct { DynamicData @@ -32118,6 +34483,18 @@ func init() { t["NvdimmNamespaceInfo"] = reflect.TypeOf((*NvdimmNamespaceInfo)(nil)).Elem() } +type NvdimmPMemNamespaceCreateSpec struct { + DynamicData + + FriendlyName string `xml:"friendlyName,omitempty"` + Size int64 `xml:"size"` + InterleavesetID int32 `xml:"interleavesetID"` +} + +func init() { + t["NvdimmPMemNamespaceCreateSpec"] = reflect.TypeOf((*NvdimmPMemNamespaceCreateSpec)(nil)).Elem() +} + type NvdimmRegionInfo struct { DynamicData @@ -32160,6 +34537,7 @@ type NvdimmSystemInfo struct { ISetInfo []NvdimmInterleaveSetInfo `xml:"iSetInfo,omitempty"` Namespace []NvdimmGuid `xml:"namespace,omitempty"` NsInfo []NvdimmNamespaceInfo `xml:"nsInfo,omitempty"` + NsDetails []NvdimmNamespaceDetails `xml:"nsDetails,omitempty"` } func init() { @@ -32245,6 +34623,14 @@ func init() { t["OpaqueNetworkTargetInfo"] = reflect.TypeOf((*OpaqueNetworkTargetInfo)(nil)).Elem() } +type OpaqueSwitchProfile struct { + ApplyProfile +} + +func init() { + t["OpaqueSwitchProfile"] = reflect.TypeOf((*OpaqueSwitchProfile)(nil)).Elem() +} + type OpenInventoryViewFolder OpenInventoryViewFolderRequestType func init() { @@ -33972,6 +36358,20 @@ func init() { t["PassiveNodeNetworkSpec"] = reflect.TypeOf((*PassiveNodeNetworkSpec)(nil)).Elem() } +type PasswordExpired struct { + InvalidLogin +} + +func init() { + t["PasswordExpired"] = reflect.TypeOf((*PasswordExpired)(nil)).Elem() +} + +type PasswordExpiredFault PasswordExpired + +func init() { + t["PasswordExpiredFault"] = reflect.TypeOf((*PasswordExpiredFault)(nil)).Elem() +} + type PasswordField struct { DynamicData @@ -34491,6 +36891,8 @@ type PhysicalNic struct { ResourcePoolSchedulerDisallowedReason []string `xml:"resourcePoolSchedulerDisallowedReason,omitempty"` AutoNegotiateSupported *bool `xml:"autoNegotiateSupported"` EnhancedNetworkingStackSupported *bool `xml:"enhancedNetworkingStackSupported"` + EnsInterruptSupported *bool `xml:"ensInterruptSupported"` + RdmaDevice string `xml:"rdmaDevice,omitempty"` } func init() { @@ -34623,6 +37025,7 @@ type PhysicalNicSpec struct { Ip *HostIpConfig `xml:"ip,omitempty"` LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty"` EnableEnhancedNetworkingStack *bool `xml:"enableEnhancedNetworkingStack"` + EnsInterruptEnabled *bool `xml:"ensInterruptEnabled"` } func init() { @@ -36066,6 +38469,44 @@ type QueryConnectionInfoViaSpecResponse struct { Returnval HostConnectInfo `xml:"returnval"` } +type QueryConnections QueryConnectionsRequestType + +func init() { + t["QueryConnections"] = reflect.TypeOf((*QueryConnections)(nil)).Elem() +} + +type QueryConnectionsRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryConnectionsRequestType"] = reflect.TypeOf((*QueryConnectionsRequestType)(nil)).Elem() +} + +type QueryConnectionsResponse struct { + Returnval []BaseVirtualMachineConnection `xml:"returnval,omitempty,typeattr"` +} + +type QueryCryptoKeyStatus QueryCryptoKeyStatusRequestType + +func init() { + t["QueryCryptoKeyStatus"] = reflect.TypeOf((*QueryCryptoKeyStatus)(nil)).Elem() +} + +type QueryCryptoKeyStatusRequestType struct { + This ManagedObjectReference `xml:"_this"` + KeyIds []CryptoKeyId `xml:"keyIds,omitempty"` + CheckKeyBitMap int32 `xml:"checkKeyBitMap"` +} + +func init() { + t["QueryCryptoKeyStatusRequestType"] = reflect.TypeOf((*QueryCryptoKeyStatusRequestType)(nil)).Elem() +} + +type QueryCryptoKeyStatusResponse struct { + Returnval []CryptoManagerKmipCryptoKeyStatus `xml:"returnval,omitempty"` +} + type QueryDatastorePerformanceSummary QueryDatastorePerformanceSummaryRequestType func init() { @@ -36561,6 +39002,25 @@ type QueryHostStatusResponse struct { Returnval VsanHostClusterStatus `xml:"returnval"` } +type QueryHostsWithAttachedLun QueryHostsWithAttachedLunRequestType + +func init() { + t["QueryHostsWithAttachedLun"] = reflect.TypeOf((*QueryHostsWithAttachedLun)(nil)).Elem() +} + +type QueryHostsWithAttachedLunRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunUuid string `xml:"lunUuid"` +} + +func init() { + t["QueryHostsWithAttachedLunRequestType"] = reflect.TypeOf((*QueryHostsWithAttachedLunRequestType)(nil)).Elem() +} + +type QueryHostsWithAttachedLunResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + type QueryIORMConfigOption QueryIORMConfigOptionRequestType func init() { @@ -37135,6 +39595,24 @@ type QueryPolicyMetadataResponse struct { Returnval []ProfilePolicyMetadata `xml:"returnval,omitempty"` } +type QueryProductLockerLocation QueryProductLockerLocationRequestType + +func init() { + t["QueryProductLockerLocation"] = reflect.TypeOf((*QueryProductLockerLocation)(nil)).Elem() +} + +type QueryProductLockerLocationRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["QueryProductLockerLocationRequestType"] = reflect.TypeOf((*QueryProductLockerLocationRequestType)(nil)).Elem() +} + +type QueryProductLockerLocationResponse struct { + Returnval string `xml:"returnval"` +} + type QueryProfileStructure QueryProfileStructureRequestType func init() { @@ -38860,6 +41338,25 @@ func init() { type RegisterKmipServerResponse struct { } +type RegisterKmsCluster RegisterKmsClusterRequestType + +func init() { + t["RegisterKmsCluster"] = reflect.TypeOf((*RegisterKmsCluster)(nil)).Elem() +} + +type RegisterKmsClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + ClusterId KeyProviderId `xml:"clusterId"` + ManagementType string `xml:"managementType,omitempty"` +} + +func init() { + t["RegisterKmsClusterRequestType"] = reflect.TypeOf((*RegisterKmsClusterRequestType)(nil)).Elem() +} + +type RegisterKmsClusterResponse struct { +} + type RegisterVMRequestType struct { This ManagedObjectReference `xml:"_this"` Path string `xml:"path"` @@ -39345,6 +41842,7 @@ type RemoveInternetScsiSendTargetsRequestType struct { This ManagedObjectReference `xml:"_this"` IScsiHbaDevice string `xml:"iScsiHbaDevice"` Targets []HostInternetScsiHbaSendTarget `xml:"targets"` + Force *bool `xml:"force"` } func init() { @@ -39505,6 +42003,24 @@ func init() { type RemoveNetworkResourcePoolResponse struct { } +type RemoveNvmeOverRdmaAdapter RemoveNvmeOverRdmaAdapterRequestType + +func init() { + t["RemoveNvmeOverRdmaAdapter"] = reflect.TypeOf((*RemoveNvmeOverRdmaAdapter)(nil)).Elem() +} + +type RemoveNvmeOverRdmaAdapterRequestType struct { + This ManagedObjectReference `xml:"_this"` + HbaDeviceName string `xml:"hbaDeviceName"` +} + +func init() { + t["RemoveNvmeOverRdmaAdapterRequestType"] = reflect.TypeOf((*RemoveNvmeOverRdmaAdapterRequestType)(nil)).Elem() +} + +type RemoveNvmeOverRdmaAdapterResponse struct { +} + type RemovePerfInterval RemovePerfIntervalRequestType func init() { @@ -39634,6 +42150,24 @@ type RemoveSnapshot_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type RemoveSoftwareAdapter RemoveSoftwareAdapterRequestType + +func init() { + t["RemoveSoftwareAdapter"] = reflect.TypeOf((*RemoveSoftwareAdapter)(nil)).Elem() +} + +type RemoveSoftwareAdapterRequestType struct { + This ManagedObjectReference `xml:"_this"` + HbaDeviceName string `xml:"hbaDeviceName"` +} + +func init() { + t["RemoveSoftwareAdapterRequestType"] = reflect.TypeOf((*RemoveSoftwareAdapterRequestType)(nil)).Elem() +} + +type RemoveSoftwareAdapterResponse struct { +} + type RemoveUser RemoveUserRequestType func init() { @@ -40005,10 +42539,10 @@ func init() { type ReplicationVmFault struct { ReplicationFault - Reason string `xml:"reason,omitempty"` - State string `xml:"state,omitempty"` - InstanceId string `xml:"instanceId,omitempty"` - Vm *ManagedObjectReference `xml:"vm,omitempty"` + Reason string `xml:"reason"` + State string `xml:"state,omitempty"` + InstanceId string `xml:"instanceId,omitempty"` + Vm ManagedObjectReference `xml:"vm"` } func init() { @@ -40430,11 +42964,12 @@ func init() { type ResourceConfigSpec struct { DynamicData - Entity *ManagedObjectReference `xml:"entity,omitempty"` - ChangeVersion string `xml:"changeVersion,omitempty"` - LastModified *time.Time `xml:"lastModified"` - CpuAllocation ResourceAllocationInfo `xml:"cpuAllocation"` - MemoryAllocation ResourceAllocationInfo `xml:"memoryAllocation"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + ChangeVersion string `xml:"changeVersion,omitempty"` + LastModified *time.Time `xml:"lastModified"` + CpuAllocation ResourceAllocationInfo `xml:"cpuAllocation"` + MemoryAllocation ResourceAllocationInfo `xml:"memoryAllocation"` + ScaleDescendantsShares string `xml:"scaleDescendantsShares,omitempty"` } func init() { @@ -40577,9 +43112,10 @@ func init() { type ResourcePoolRuntimeInfo struct { DynamicData - Memory ResourcePoolResourceUsage `xml:"memory"` - Cpu ResourcePoolResourceUsage `xml:"cpu"` - OverallStatus ManagedEntityStatus `xml:"overallStatus"` + Memory ResourcePoolResourceUsage `xml:"memory"` + Cpu ResourcePoolResourceUsage `xml:"cpu"` + OverallStatus ManagedEntityStatus `xml:"overallStatus"` + SharesScalable string `xml:"sharesScalable,omitempty"` } func init() { @@ -40861,6 +43397,24 @@ type RetrieveDiskPartitionInfoResponse struct { Returnval []HostDiskPartitionInfo `xml:"returnval,omitempty"` } +type RetrieveDynamicPassthroughInfo RetrieveDynamicPassthroughInfoRequestType + +func init() { + t["RetrieveDynamicPassthroughInfo"] = reflect.TypeOf((*RetrieveDynamicPassthroughInfo)(nil)).Elem() +} + +type RetrieveDynamicPassthroughInfoRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveDynamicPassthroughInfoRequestType"] = reflect.TypeOf((*RetrieveDynamicPassthroughInfoRequestType)(nil)).Elem() +} + +type RetrieveDynamicPassthroughInfoResponse struct { + Returnval []VirtualMachineDynamicPassthroughInfo `xml:"returnval,omitempty"` +} + type RetrieveEntityPermissions RetrieveEntityPermissionsRequestType func init() { @@ -40900,6 +43454,24 @@ type RetrieveEntityScheduledTaskResponse struct { Returnval []ManagedObjectReference `xml:"returnval,omitempty"` } +type RetrieveFreeEpcMemory RetrieveFreeEpcMemoryRequestType + +func init() { + t["RetrieveFreeEpcMemory"] = reflect.TypeOf((*RetrieveFreeEpcMemory)(nil)).Elem() +} + +type RetrieveFreeEpcMemoryRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveFreeEpcMemoryRequestType"] = reflect.TypeOf((*RetrieveFreeEpcMemoryRequestType)(nil)).Elem() +} + +type RetrieveFreeEpcMemoryResponse struct { + Returnval int64 `xml:"returnval"` +} + type RetrieveHardwareUptime RetrieveHardwareUptimeRequestType func init() { @@ -41187,6 +43759,45 @@ type RetrieveServiceContentResponse struct { Returnval ServiceContent `xml:"returnval"` } +type RetrieveServiceProviderEntities RetrieveServiceProviderEntitiesRequestType + +func init() { + t["RetrieveServiceProviderEntities"] = reflect.TypeOf((*RetrieveServiceProviderEntities)(nil)).Elem() +} + +type RetrieveServiceProviderEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["RetrieveServiceProviderEntitiesRequestType"] = reflect.TypeOf((*RetrieveServiceProviderEntitiesRequestType)(nil)).Elem() +} + +type RetrieveServiceProviderEntitiesResponse struct { + Returnval []ManagedObjectReference `xml:"returnval,omitempty"` +} + +type RetrieveSnapshotDetails RetrieveSnapshotDetailsRequestType + +func init() { + t["RetrieveSnapshotDetails"] = reflect.TypeOf((*RetrieveSnapshotDetails)(nil)).Elem() +} + +type RetrieveSnapshotDetailsRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` +} + +func init() { + t["RetrieveSnapshotDetailsRequestType"] = reflect.TypeOf((*RetrieveSnapshotDetailsRequestType)(nil)).Elem() +} + +type RetrieveSnapshotDetailsResponse struct { + Returnval VStorageObjectSnapshotDetails `xml:"returnval"` +} + type RetrieveSnapshotInfo RetrieveSnapshotInfoRequestType func init() { @@ -41861,25 +44472,27 @@ func init() { type ScsiLun struct { HostDevice - Key string `xml:"key,omitempty"` - Uuid string `xml:"uuid"` - Descriptor []ScsiLunDescriptor `xml:"descriptor,omitempty"` - CanonicalName string `xml:"canonicalName,omitempty"` - DisplayName string `xml:"displayName,omitempty"` - LunType string `xml:"lunType"` - Vendor string `xml:"vendor,omitempty"` - Model string `xml:"model,omitempty"` - Revision string `xml:"revision,omitempty"` - ScsiLevel int32 `xml:"scsiLevel,omitempty"` - SerialNumber string `xml:"serialNumber,omitempty"` - DurableName *ScsiLunDurableName `xml:"durableName,omitempty"` - AlternateName []ScsiLunDurableName `xml:"alternateName,omitempty"` - StandardInquiry []byte `xml:"standardInquiry,omitempty"` - QueueDepth int32 `xml:"queueDepth,omitempty"` - OperationalState []string `xml:"operationalState"` - Capabilities *ScsiLunCapabilities `xml:"capabilities,omitempty"` - VStorageSupport string `xml:"vStorageSupport,omitempty"` - ProtocolEndpoint *bool `xml:"protocolEndpoint"` + Key string `xml:"key,omitempty"` + Uuid string `xml:"uuid"` + Descriptor []ScsiLunDescriptor `xml:"descriptor,omitempty"` + CanonicalName string `xml:"canonicalName,omitempty"` + DisplayName string `xml:"displayName,omitempty"` + LunType string `xml:"lunType"` + Vendor string `xml:"vendor,omitempty"` + Model string `xml:"model,omitempty"` + Revision string `xml:"revision,omitempty"` + ScsiLevel int32 `xml:"scsiLevel,omitempty"` + SerialNumber string `xml:"serialNumber,omitempty"` + DurableName *ScsiLunDurableName `xml:"durableName,omitempty"` + AlternateName []ScsiLunDurableName `xml:"alternateName,omitempty"` + StandardInquiry []byte `xml:"standardInquiry,omitempty"` + QueueDepth int32 `xml:"queueDepth,omitempty"` + OperationalState []string `xml:"operationalState"` + Capabilities *ScsiLunCapabilities `xml:"capabilities,omitempty"` + VStorageSupport string `xml:"vStorageSupport,omitempty"` + ProtocolEndpoint *bool `xml:"protocolEndpoint"` + PerenniallyReserved *bool `xml:"perenniallyReserved"` + ClusteredVmdkSupported *bool `xml:"clusteredVmdkSupported"` } func init() { @@ -42004,7 +44617,7 @@ func init() { type SecondaryVmAlreadyRegistered struct { VmFaultToleranceIssue - InstanceUuid string `xml:"instanceUuid,omitempty"` + InstanceUuid string `xml:"instanceUuid"` } func init() { @@ -42020,7 +44633,7 @@ func init() { type SecondaryVmNotRegistered struct { VmFaultToleranceIssue - InstanceUuid string `xml:"instanceUuid,omitempty"` + InstanceUuid string `xml:"instanceUuid"` } func init() { @@ -42244,6 +44857,7 @@ type ServiceContent struct { TaskManager *ManagedObjectReference `xml:"taskManager,omitempty"` ExtensionManager *ManagedObjectReference `xml:"extensionManager,omitempty"` CustomizationSpecManager *ManagedObjectReference `xml:"customizationSpecManager,omitempty"` + GuestCustomizationManager *ManagedObjectReference `xml:"guestCustomizationManager,omitempty"` CustomFieldsManager *ManagedObjectReference `xml:"customFieldsManager,omitempty"` AccountManager *ManagedObjectReference `xml:"accountManager,omitempty"` DiagnosticManager *ManagedObjectReference `xml:"diagnosticManager,omitempty"` @@ -42274,6 +44888,9 @@ type ServiceContent struct { HealthUpdateManager *ManagedObjectReference `xml:"healthUpdateManager,omitempty"` FailoverClusterConfigurator *ManagedObjectReference `xml:"failoverClusterConfigurator,omitempty"` FailoverClusterManager *ManagedObjectReference `xml:"failoverClusterManager,omitempty"` + TenantManager *ManagedObjectReference `xml:"tenantManager,omitempty"` + SiteInfoManager *ManagedObjectReference `xml:"siteInfoManager,omitempty"` + StorageQueryManager *ManagedObjectReference `xml:"storageQueryManager,omitempty"` } func init() { @@ -42379,6 +44996,7 @@ type SessionManagerGenericServiceTicket struct { Id string `xml:"id"` HostName string `xml:"hostName,omitempty"` SslThumbprint string `xml:"sslThumbprint,omitempty"` + TicketType string `xml:"ticketType,omitempty"` } func init() { @@ -42454,6 +45072,43 @@ func init() { type SetCollectorPageSizeResponse struct { } +type SetCryptoMode SetCryptoModeRequestType + +func init() { + t["SetCryptoMode"] = reflect.TypeOf((*SetCryptoMode)(nil)).Elem() +} + +type SetCryptoModeRequestType struct { + This ManagedObjectReference `xml:"_this"` + CryptoMode string `xml:"cryptoMode"` +} + +func init() { + t["SetCryptoModeRequestType"] = reflect.TypeOf((*SetCryptoModeRequestType)(nil)).Elem() +} + +type SetCryptoModeResponse struct { +} + +type SetDefaultKmsCluster SetDefaultKmsClusterRequestType + +func init() { + t["SetDefaultKmsCluster"] = reflect.TypeOf((*SetDefaultKmsCluster)(nil)).Elem() +} + +type SetDefaultKmsClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity *ManagedObjectReference `xml:"entity,omitempty"` + ClusterId *KeyProviderId `xml:"clusterId,omitempty"` +} + +func init() { + t["SetDefaultKmsClusterRequestType"] = reflect.TypeOf((*SetDefaultKmsClusterRequestType)(nil)).Elem() +} + +type SetDefaultKmsClusterResponse struct { +} + type SetDisplayTopology SetDisplayTopologyRequestType func init() { @@ -42870,6 +45525,14 @@ func init() { t["SingleMac"] = reflect.TypeOf((*SingleMac)(nil)).Elem() } +type SiteInfo struct { + DynamicData +} + +func init() { + t["SiteInfo"] = reflect.TypeOf((*SiteInfo)(nil)).Elem() +} + type SnapshotCloneNotSupported struct { SnapshotCopyNotSupported } @@ -43102,6 +45765,20 @@ func init() { t["SoftwarePackageCapability"] = reflect.TypeOf((*SoftwarePackageCapability)(nil)).Elem() } +type SolutionUserRequired struct { + SecurityError +} + +func init() { + t["SolutionUserRequired"] = reflect.TypeOf((*SolutionUserRequired)(nil)).Elem() +} + +type SolutionUserRequiredFault SolutionUserRequired + +func init() { + t["SolutionUserRequiredFault"] = reflect.TypeOf((*SolutionUserRequiredFault)(nil)).Elem() +} + type SourceNodeSpec struct { DynamicData @@ -43186,6 +45863,26 @@ func init() { type StandbyGuestResponse struct { } +type StartGuestNetworkRequestType struct { + This ManagedObjectReference `xml:"_this"` + Vm ManagedObjectReference `xml:"vm"` + Auth BaseGuestAuthentication `xml:"auth,typeattr"` +} + +func init() { + t["StartGuestNetworkRequestType"] = reflect.TypeOf((*StartGuestNetworkRequestType)(nil)).Elem() +} + +type StartGuestNetwork_Task StartGuestNetworkRequestType + +func init() { + t["StartGuestNetwork_Task"] = reflect.TypeOf((*StartGuestNetwork_Task)(nil)).Elem() +} + +type StartGuestNetwork_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type StartProgramInGuest StartProgramInGuestRequestType func init() { @@ -44538,6 +47235,24 @@ func init() { type TerminateVMResponse struct { } +type TestTimeService TestTimeServiceRequestType + +func init() { + t["TestTimeService"] = reflect.TypeOf((*TestTimeService)(nil)).Elem() +} + +type TestTimeServiceRequestType struct { + This ManagedObjectReference `xml:"_this"` +} + +func init() { + t["TestTimeServiceRequestType"] = reflect.TypeOf((*TestTimeServiceRequestType)(nil)).Elem() +} + +type TestTimeServiceResponse struct { + Returnval *HostDateTimeSystemServiceTestResult `xml:"returnval,omitempty"` +} + type ThirdPartyLicenseAssignmentFailed struct { RuntimeFault @@ -44748,18 +47463,19 @@ func init() { type ToolsConfigInfo struct { DynamicData - ToolsVersion int32 `xml:"toolsVersion,omitempty"` - ToolsInstallType string `xml:"toolsInstallType,omitempty"` - AfterPowerOn *bool `xml:"afterPowerOn"` - AfterResume *bool `xml:"afterResume"` - BeforeGuestStandby *bool `xml:"beforeGuestStandby"` - BeforeGuestShutdown *bool `xml:"beforeGuestShutdown"` - BeforeGuestReboot *bool `xml:"beforeGuestReboot"` - ToolsUpgradePolicy string `xml:"toolsUpgradePolicy,omitempty"` - PendingCustomization string `xml:"pendingCustomization,omitempty"` - CustomizationKeyId *CryptoKeyId `xml:"customizationKeyId,omitempty"` - SyncTimeWithHost *bool `xml:"syncTimeWithHost"` - LastInstallInfo *ToolsConfigInfoToolsLastInstallInfo `xml:"lastInstallInfo,omitempty"` + ToolsVersion int32 `xml:"toolsVersion,omitempty"` + ToolsInstallType string `xml:"toolsInstallType,omitempty"` + AfterPowerOn *bool `xml:"afterPowerOn"` + AfterResume *bool `xml:"afterResume"` + BeforeGuestStandby *bool `xml:"beforeGuestStandby"` + BeforeGuestShutdown *bool `xml:"beforeGuestShutdown"` + BeforeGuestReboot *bool `xml:"beforeGuestReboot"` + ToolsUpgradePolicy string `xml:"toolsUpgradePolicy,omitempty"` + PendingCustomization string `xml:"pendingCustomization,omitempty"` + CustomizationKeyId *CryptoKeyId `xml:"customizationKeyId,omitempty"` + SyncTimeWithHostAllowed *bool `xml:"syncTimeWithHostAllowed"` + SyncTimeWithHost *bool `xml:"syncTimeWithHost"` + LastInstallInfo *ToolsConfigInfoToolsLastInstallInfo `xml:"lastInstallInfo,omitempty"` } func init() { @@ -45169,6 +47885,24 @@ type UnmapVmfsVolumeEx_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type UnmarkServiceProviderEntities UnmarkServiceProviderEntitiesRequestType + +func init() { + t["UnmarkServiceProviderEntities"] = reflect.TypeOf((*UnmarkServiceProviderEntities)(nil)).Elem() +} + +type UnmarkServiceProviderEntitiesRequestType struct { + This ManagedObjectReference `xml:"_this"` + Entity []ManagedObjectReference `xml:"entity,omitempty"` +} + +func init() { + t["UnmarkServiceProviderEntitiesRequestType"] = reflect.TypeOf((*UnmarkServiceProviderEntitiesRequestType)(nil)).Elem() +} + +type UnmarkServiceProviderEntitiesResponse struct { +} + type UnmountDiskMappingRequestType struct { This ManagedObjectReference `xml:"_this"` Mapping []VsanHostDiskMapping `xml:"mapping"` @@ -45348,6 +48082,24 @@ func init() { type UnregisterHealthUpdateProviderResponse struct { } +type UnregisterKmsCluster UnregisterKmsClusterRequestType + +func init() { + t["UnregisterKmsCluster"] = reflect.TypeOf((*UnregisterKmsCluster)(nil)).Elem() +} + +type UnregisterKmsClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + ClusterId KeyProviderId `xml:"clusterId"` +} + +func init() { + t["UnregisterKmsClusterRequestType"] = reflect.TypeOf((*UnregisterKmsClusterRequestType)(nil)).Elem() +} + +type UnregisterKmsClusterResponse struct { +} + type UnregisterVM UnregisterVMRequestType func init() { @@ -45475,6 +48227,24 @@ type UpdateAnswerFile_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type UpdateAssignableHardwareConfig UpdateAssignableHardwareConfigRequestType + +func init() { + t["UpdateAssignableHardwareConfig"] = reflect.TypeOf((*UpdateAssignableHardwareConfig)(nil)).Elem() +} + +type UpdateAssignableHardwareConfigRequestType struct { + This ManagedObjectReference `xml:"_this"` + Config HostAssignableHardwareConfig `xml:"config"` +} + +func init() { + t["UpdateAssignableHardwareConfigRequestType"] = reflect.TypeOf((*UpdateAssignableHardwareConfigRequestType)(nil)).Elem() +} + +type UpdateAssignableHardwareConfigResponse struct { +} + type UpdateAssignedLicense UpdateAssignedLicenseRequestType func init() { @@ -45826,25 +48596,6 @@ func init() { type UpdateGraphicsConfigResponse struct { } -type UpdateHostCustomizationsRequestType struct { - This ManagedObjectReference `xml:"_this"` - HostToConfigSpecMap []HostProfileManagerHostToConfigSpecMap `xml:"hostToConfigSpecMap,omitempty"` -} - -func init() { - t["UpdateHostCustomizationsRequestType"] = reflect.TypeOf((*UpdateHostCustomizationsRequestType)(nil)).Elem() -} - -type UpdateHostCustomizations_Task UpdateHostCustomizationsRequestType - -func init() { - t["UpdateHostCustomizations_Task"] = reflect.TypeOf((*UpdateHostCustomizations_Task)(nil)).Elem() -} - -type UpdateHostCustomizations_TaskResponse struct { - Returnval ManagedObjectReference `xml:"returnval"` -} - type UpdateHostImageAcceptanceLevel UpdateHostImageAcceptanceLevelRequestType func init() { @@ -45919,6 +48670,25 @@ func init() { type UpdateHostSubSpecificationResponse struct { } +type UpdateHppMultipathLunPolicy UpdateHppMultipathLunPolicyRequestType + +func init() { + t["UpdateHppMultipathLunPolicy"] = reflect.TypeOf((*UpdateHppMultipathLunPolicy)(nil)).Elem() +} + +type UpdateHppMultipathLunPolicyRequestType struct { + This ManagedObjectReference `xml:"_this"` + LunId string `xml:"lunId"` + Policy HostMultipathInfoHppLogicalUnitPolicy `xml:"policy"` +} + +func init() { + t["UpdateHppMultipathLunPolicyRequestType"] = reflect.TypeOf((*UpdateHppMultipathLunPolicyRequestType)(nil)).Elem() +} + +type UpdateHppMultipathLunPolicyResponse struct { +} + type UpdateInternetScsiAdvancedOptions UpdateInternetScsiAdvancedOptionsRequestType func init() { @@ -46427,6 +49197,25 @@ func init() { type UpdatePortGroupResponse struct { } +type UpdateProductLockerLocationRequestType struct { + This ManagedObjectReference `xml:"_this"` + Path string `xml:"path"` +} + +func init() { + t["UpdateProductLockerLocationRequestType"] = reflect.TypeOf((*UpdateProductLockerLocationRequestType)(nil)).Elem() +} + +type UpdateProductLockerLocation_Task UpdateProductLockerLocationRequestType + +func init() { + t["UpdateProductLockerLocation_Task"] = reflect.TypeOf((*UpdateProductLockerLocation_Task)(nil)).Elem() +} + +type UpdateProductLockerLocation_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type UpdateProgress UpdateProgressRequestType func init() { @@ -46715,6 +49504,28 @@ type UpdateVStorageInfrastructureObjectPolicy_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type UpdateVStorageObjectCryptoRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + DisksCrypto *DiskCryptoSpec `xml:"disksCrypto,omitempty"` +} + +func init() { + t["UpdateVStorageObjectCryptoRequestType"] = reflect.TypeOf((*UpdateVStorageObjectCryptoRequestType)(nil)).Elem() +} + +type UpdateVStorageObjectCrypto_Task UpdateVStorageObjectCryptoRequestType + +func init() { + t["UpdateVStorageObjectCrypto_Task"] = reflect.TypeOf((*UpdateVStorageObjectCrypto_Task)(nil)).Elem() +} + +type UpdateVStorageObjectCrypto_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type UpdateVStorageObjectPolicyRequestType struct { This ManagedObjectReference `xml:"_this"` Id ID `xml:"id"` @@ -47559,6 +50370,28 @@ func init() { t["VAppTaskInProgressFault"] = reflect.TypeOf((*VAppTaskInProgressFault)(nil)).Elem() } +type VCenterUpdateVStorageObjectMetadataExRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + Metadata []KeyValue `xml:"metadata,omitempty"` + DeleteKeys []string `xml:"deleteKeys,omitempty"` +} + +func init() { + t["VCenterUpdateVStorageObjectMetadataExRequestType"] = reflect.TypeOf((*VCenterUpdateVStorageObjectMetadataExRequestType)(nil)).Elem() +} + +type VCenterUpdateVStorageObjectMetadataEx_Task VCenterUpdateVStorageObjectMetadataExRequestType + +func init() { + t["VCenterUpdateVStorageObjectMetadataEx_Task"] = reflect.TypeOf((*VCenterUpdateVStorageObjectMetadataEx_Task)(nil)).Elem() +} + +type VCenterUpdateVStorageObjectMetadataEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval"` +} + type VFlashCacheHotConfigNotSupported struct { VmConfigFault } @@ -47855,6 +50688,8 @@ type VMwareDVSFeatureCapability struct { MulticastSnoopingSupported *bool `xml:"multicastSnoopingSupported"` VspanCapability *VMwareDVSVspanCapability `xml:"vspanCapability,omitempty"` LacpCapability *VMwareDvsLacpCapability `xml:"lacpCapability,omitempty"` + NsxSupported *bool `xml:"nsxSupported"` + MtuCapability *VMwareDvsMtuCapability `xml:"mtuCapability,omitempty"` } func init() { @@ -47903,6 +50738,7 @@ type VMwareDVSPortSetting struct { TxUplink *BoolPolicy `xml:"txUplink,omitempty"` LacpPolicy *VMwareUplinkLacpPolicy `xml:"lacpPolicy,omitempty"` MacManagementPolicy *DVSMacManagementPolicy `xml:"macManagementPolicy,omitempty"` + VNI *IntPolicy `xml:"VNI,omitempty"` } func init() { @@ -47916,6 +50752,7 @@ type VMwareDVSPortgroupPolicy struct { UplinkTeamingOverrideAllowed bool `xml:"uplinkTeamingOverrideAllowed"` SecurityPolicyOverrideAllowed bool `xml:"securityPolicyOverrideAllowed"` IpfixOverrideAllowed *bool `xml:"ipfixOverrideAllowed"` + MacManagementOverrideAllowed *bool `xml:"macManagementOverrideAllowed"` } func init() { @@ -48026,6 +50863,7 @@ type VMwareDvsLacpCapability struct { LacpSupported *bool `xml:"lacpSupported"` MultiLacpGroupSupported *bool `xml:"multiLacpGroupSupported"` + LacpFastModeSupported *bool `xml:"lacpFastModeSupported"` } func init() { @@ -48044,6 +50882,7 @@ type VMwareDvsLacpGroupConfig struct { Ipfix *VMwareDvsLagIpfixConfig `xml:"ipfix,omitempty"` UplinkName []string `xml:"uplinkName,omitempty"` UplinkPortKey []string `xml:"uplinkPortKey,omitempty"` + TimeoutMode string `xml:"timeoutMode,omitempty"` } func init() { @@ -48081,6 +50920,17 @@ func init() { t["VMwareDvsLagVlanConfig"] = reflect.TypeOf((*VMwareDvsLagVlanConfig)(nil)).Elem() } +type VMwareDvsMtuCapability struct { + DynamicData + + MinMtuSupported int32 `xml:"minMtuSupported"` + MaxMtuSupported int32 `xml:"maxMtuSupported"` +} + +func init() { + t["VMwareDvsMtuCapability"] = reflect.TypeOf((*VMwareDvsMtuCapability)(nil)).Elem() +} + type VMwareIpfixConfig struct { DynamicData @@ -48226,6 +51076,17 @@ type VStorageObjectCreateSnapshot_TaskResponse struct { Returnval ManagedObjectReference `xml:"returnval"` } +type VStorageObjectSnapshotDetails struct { + DynamicData + + Path string `xml:"path,omitempty"` + ChangedBlockTrackingId string `xml:"changedBlockTrackingId,omitempty"` +} + +func init() { + t["VStorageObjectSnapshotDetails"] = reflect.TypeOf((*VStorageObjectSnapshotDetails)(nil)).Elem() +} + type VStorageObjectSnapshotInfo struct { DynamicData @@ -48285,6 +51146,7 @@ type VVolVmConfigFileUpdateResultFailedVmConfigFileInfo struct { DynamicData TargetConfigVVolId string `xml:"targetConfigVVolId"` + DsPath string `xml:"dsPath,omitempty"` Fault LocalizedMethodFault `xml:"fault"` } @@ -48311,6 +51173,26 @@ func init() { type ValidateCredentialsInGuestResponse struct { } +type ValidateHCIConfiguration ValidateHCIConfigurationRequestType + +func init() { + t["ValidateHCIConfiguration"] = reflect.TypeOf((*ValidateHCIConfiguration)(nil)).Elem() +} + +type ValidateHCIConfigurationRequestType struct { + This ManagedObjectReference `xml:"_this"` + HciConfigSpec *ClusterComputeResourceHCIConfigSpec `xml:"hciConfigSpec,omitempty"` + Hosts []ManagedObjectReference `xml:"hosts,omitempty"` +} + +func init() { + t["ValidateHCIConfigurationRequestType"] = reflect.TypeOf((*ValidateHCIConfigurationRequestType)(nil)).Elem() +} + +type ValidateHCIConfigurationResponse struct { + Returnval []BaseClusterComputeResourceValidationResultBase `xml:"returnval,omitempty,typeattr"` +} + type ValidateHost ValidateHostRequestType func init() { @@ -48841,11 +51723,12 @@ func init() { type VirtualDeviceConfigSpec struct { DynamicData - Operation VirtualDeviceConfigSpecOperation `xml:"operation,omitempty"` - FileOperation VirtualDeviceConfigSpecFileOperation `xml:"fileOperation,omitempty"` - Device BaseVirtualDevice `xml:"device,typeattr"` - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` - Backing *VirtualDeviceConfigSpecBackingSpec `xml:"backing,omitempty"` + Operation VirtualDeviceConfigSpecOperation `xml:"operation,omitempty"` + FileOperation VirtualDeviceConfigSpecFileOperation `xml:"fileOperation,omitempty"` + Device BaseVirtualDevice `xml:"device,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Backing *VirtualDeviceConfigSpecBackingSpec `xml:"backing,omitempty"` + FilterSpec []BaseVirtualMachineBaseIndependentFilterSpec `xml:"filterSpec,omitempty,typeattr"` } func init() { @@ -49025,15 +51908,16 @@ func init() { type VirtualDisk struct { VirtualDevice - CapacityInKB int64 `xml:"capacityInKB"` - CapacityInBytes int64 `xml:"capacityInBytes,omitempty"` - Shares *SharesInfo `xml:"shares,omitempty"` - StorageIOAllocation *StorageIOAllocationInfo `xml:"storageIOAllocation,omitempty"` - DiskObjectId string `xml:"diskObjectId,omitempty"` - VFlashCacheConfigInfo *VirtualDiskVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty"` - Iofilter []string `xml:"iofilter,omitempty"` - VDiskId *ID `xml:"vDiskId,omitempty"` - NativeUnmanagedLinkedClone *bool `xml:"nativeUnmanagedLinkedClone"` + CapacityInKB int64 `xml:"capacityInKB"` + CapacityInBytes int64 `xml:"capacityInBytes,omitempty"` + Shares *SharesInfo `xml:"shares,omitempty"` + StorageIOAllocation *StorageIOAllocationInfo `xml:"storageIOAllocation,omitempty"` + DiskObjectId string `xml:"diskObjectId,omitempty"` + VFlashCacheConfigInfo *VirtualDiskVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty"` + Iofilter []string `xml:"iofilter,omitempty"` + VDiskId *ID `xml:"vDiskId,omitempty"` + NativeUnmanagedLinkedClone *bool `xml:"nativeUnmanagedLinkedClone"` + IndependentFilters []BaseVirtualMachineBaseIndependentFilterSpec `xml:"independentFilters,omitempty,typeattr"` } func init() { @@ -49711,25 +52595,28 @@ func init() { type VirtualHardwareOption struct { DynamicData - HwVersion int32 `xml:"hwVersion"` - VirtualDeviceOption []BaseVirtualDeviceOption `xml:"virtualDeviceOption,typeattr"` - DeviceListReadonly bool `xml:"deviceListReadonly"` - NumCPU []int32 `xml:"numCPU"` - NumCoresPerSocket *IntOption `xml:"numCoresPerSocket,omitempty"` - NumCpuReadonly bool `xml:"numCpuReadonly"` - MemoryMB LongOption `xml:"memoryMB"` - NumPCIControllers IntOption `xml:"numPCIControllers"` - NumIDEControllers IntOption `xml:"numIDEControllers"` - NumUSBControllers IntOption `xml:"numUSBControllers"` - NumUSBXHCIControllers *IntOption `xml:"numUSBXHCIControllers,omitempty"` - NumSIOControllers IntOption `xml:"numSIOControllers"` - NumPS2Controllers IntOption `xml:"numPS2Controllers"` - LicensingLimit []string `xml:"licensingLimit,omitempty"` - NumSupportedWwnPorts *IntOption `xml:"numSupportedWwnPorts,omitempty"` - NumSupportedWwnNodes *IntOption `xml:"numSupportedWwnNodes,omitempty"` - ResourceConfigOption *ResourceConfigOption `xml:"resourceConfigOption,omitempty"` - NumNVDIMMControllers *IntOption `xml:"numNVDIMMControllers,omitempty"` - NumTPMDevices *IntOption `xml:"numTPMDevices,omitempty"` + HwVersion int32 `xml:"hwVersion"` + VirtualDeviceOption []BaseVirtualDeviceOption `xml:"virtualDeviceOption,typeattr"` + DeviceListReadonly bool `xml:"deviceListReadonly"` + NumCPU []int32 `xml:"numCPU"` + NumCoresPerSocket *IntOption `xml:"numCoresPerSocket,omitempty"` + NumCpuReadonly bool `xml:"numCpuReadonly"` + MemoryMB LongOption `xml:"memoryMB"` + NumPCIControllers IntOption `xml:"numPCIControllers"` + NumIDEControllers IntOption `xml:"numIDEControllers"` + NumUSBControllers IntOption `xml:"numUSBControllers"` + NumUSBXHCIControllers *IntOption `xml:"numUSBXHCIControllers,omitempty"` + NumSIOControllers IntOption `xml:"numSIOControllers"` + NumPS2Controllers IntOption `xml:"numPS2Controllers"` + LicensingLimit []string `xml:"licensingLimit,omitempty"` + NumSupportedWwnPorts *IntOption `xml:"numSupportedWwnPorts,omitempty"` + NumSupportedWwnNodes *IntOption `xml:"numSupportedWwnNodes,omitempty"` + ResourceConfigOption *ResourceConfigOption `xml:"resourceConfigOption,omitempty"` + NumNVDIMMControllers *IntOption `xml:"numNVDIMMControllers,omitempty"` + NumTPMDevices *IntOption `xml:"numTPMDevices,omitempty"` + NumWDTDevices *IntOption `xml:"numWDTDevices,omitempty"` + NumPrecisionClockDevices *IntOption `xml:"numPrecisionClockDevices,omitempty"` + EpcMemoryMB *LongOption `xml:"epcMemoryMB,omitempty"` } func init() { @@ -49846,6 +52733,14 @@ func init() { t["VirtualMachineAffinityInfo"] = reflect.TypeOf((*VirtualMachineAffinityInfo)(nil)).Elem() } +type VirtualMachineBaseIndependentFilterSpec struct { + DynamicData +} + +func init() { + t["VirtualMachineBaseIndependentFilterSpec"] = reflect.TypeOf((*VirtualMachineBaseIndependentFilterSpec)(nil)).Elem() +} + type VirtualMachineBootOptions struct { DynamicData @@ -49951,6 +52846,10 @@ type VirtualMachineCapability struct { VirtualMmuUsageIgnored *bool `xml:"virtualMmuUsageIgnored"` VirtualExecUsageIgnored *bool `xml:"virtualExecUsageIgnored"` DiskOnlySnapshotOnSuspendedVMSupported *bool `xml:"diskOnlySnapshotOnSuspendedVMSupported"` + SuspendToMemorySupported *bool `xml:"suspendToMemorySupported"` + ToolsSyncTimeAllowSupported *bool `xml:"toolsSyncTimeAllowSupported"` + SevSupported *bool `xml:"sevSupported"` + PmemFailoverSupported *bool `xml:"pmemFailoverSupported"` } func init() { @@ -50012,6 +52911,7 @@ type VirtualMachineConfigInfo struct { ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` DefaultPowerOps VirtualMachineDefaultPowerOpInfo `xml:"defaultPowerOps"` Hardware VirtualHardware `xml:"hardware"` + VcpuConfig []VirtualMachineVcpuConfig `xml:"vcpuConfig,omitempty"` CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` @@ -50051,6 +52951,13 @@ type VirtualMachineConfigInfo struct { KeyId *CryptoKeyId `xml:"keyId,omitempty"` GuestIntegrityInfo *VirtualMachineGuestIntegrityInfo `xml:"guestIntegrityInfo,omitempty"` MigrateEncryption string `xml:"migrateEncryption,omitempty"` + SgxInfo *VirtualMachineSgxInfo `xml:"sgxInfo,omitempty"` + ContentLibItemInfo *VirtualMachineContentLibraryItemInfo `xml:"contentLibItemInfo,omitempty"` + FtEncryptionMode string `xml:"ftEncryptionMode,omitempty"` + GuestMonitoringModeInfo *VirtualMachineGuestMonitoringModeInfo `xml:"guestMonitoringModeInfo,omitempty"` + SevEnabled *bool `xml:"sevEnabled"` + PmemFailoverEnabled *bool `xml:"pmemFailoverEnabled"` + Pmem *VirtualMachineVirtualPMem `xml:"pmem,omitempty"` } func init() { @@ -50119,66 +53026,73 @@ func init() { type VirtualMachineConfigSpec struct { DynamicData - ChangeVersion string `xml:"changeVersion,omitempty"` - Name string `xml:"name,omitempty"` - Version string `xml:"version,omitempty"` - CreateDate *time.Time `xml:"createDate"` - Uuid string `xml:"uuid,omitempty"` - InstanceUuid string `xml:"instanceUuid,omitempty"` - NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty"` - NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty"` - NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty"` - NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty"` - NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty"` - NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled"` - NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks"` - NpivWorldWideNameOp string `xml:"npivWorldWideNameOp,omitempty"` - LocationId string `xml:"locationId,omitempty"` - GuestId string `xml:"guestId,omitempty"` - AlternateGuestName string `xml:"alternateGuestName,omitempty"` - Annotation string `xml:"annotation,omitempty"` - Files *VirtualMachineFileInfo `xml:"files,omitempty"` - Tools *ToolsConfigInfo `xml:"tools,omitempty"` - Flags *VirtualMachineFlagInfo `xml:"flags,omitempty"` - ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` - PowerOpInfo *VirtualMachineDefaultPowerOpInfo `xml:"powerOpInfo,omitempty"` - NumCPUs int32 `xml:"numCPUs,omitempty"` - NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty"` - MemoryMB int64 `xml:"memoryMB,omitempty"` - MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled"` - CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled"` - CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled"` - VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` - VirtualSMCPresent *bool `xml:"virtualSMCPresent"` - DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` - CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` - MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` - LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` - CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty"` - MemoryAffinity *VirtualMachineAffinityInfo `xml:"memoryAffinity,omitempty"` - NetworkShaper *VirtualMachineNetworkShaperInfo `xml:"networkShaper,omitempty"` - CpuFeatureMask []VirtualMachineCpuIdInfoSpec `xml:"cpuFeatureMask,omitempty"` - ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` - SwapPlacement string `xml:"swapPlacement,omitempty"` - BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty"` - VAppConfig BaseVmConfigSpec `xml:"vAppConfig,omitempty,typeattr"` - FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` - RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty"` - VAppConfigRemoved *bool `xml:"vAppConfigRemoved"` - VAssertsEnabled *bool `xml:"vAssertsEnabled"` - ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled"` - Firmware string `xml:"firmware,omitempty"` - MaxMksConnections int32 `xml:"maxMksConnections,omitempty"` - GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled"` - ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` - MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax"` - NestedHVEnabled *bool `xml:"nestedHVEnabled"` - VPMCEnabled *bool `xml:"vPMCEnabled"` - ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty"` - VmProfile []BaseVirtualMachineProfileSpec `xml:"vmProfile,omitempty,typeattr"` - MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled"` - Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` - MigrateEncryption string `xml:"migrateEncryption,omitempty"` + ChangeVersion string `xml:"changeVersion,omitempty"` + Name string `xml:"name,omitempty"` + Version string `xml:"version,omitempty"` + CreateDate *time.Time `xml:"createDate"` + Uuid string `xml:"uuid,omitempty"` + InstanceUuid string `xml:"instanceUuid,omitempty"` + NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty"` + NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty"` + NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty"` + NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty"` + NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty"` + NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled"` + NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks"` + NpivWorldWideNameOp string `xml:"npivWorldWideNameOp,omitempty"` + LocationId string `xml:"locationId,omitempty"` + GuestId string `xml:"guestId,omitempty"` + AlternateGuestName string `xml:"alternateGuestName,omitempty"` + Annotation string `xml:"annotation,omitempty"` + Files *VirtualMachineFileInfo `xml:"files,omitempty"` + Tools *ToolsConfigInfo `xml:"tools,omitempty"` + Flags *VirtualMachineFlagInfo `xml:"flags,omitempty"` + ConsolePreferences *VirtualMachineConsolePreferences `xml:"consolePreferences,omitempty"` + PowerOpInfo *VirtualMachineDefaultPowerOpInfo `xml:"powerOpInfo,omitempty"` + NumCPUs int32 `xml:"numCPUs,omitempty"` + VcpuConfig []VirtualMachineVcpuConfig `xml:"vcpuConfig,omitempty"` + NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty"` + MemoryMB int64 `xml:"memoryMB,omitempty"` + MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled"` + CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled"` + CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled"` + VirtualICH7MPresent *bool `xml:"virtualICH7MPresent"` + VirtualSMCPresent *bool `xml:"virtualSMCPresent"` + DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` + CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty"` + MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty"` + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` + CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty"` + MemoryAffinity *VirtualMachineAffinityInfo `xml:"memoryAffinity,omitempty"` + NetworkShaper *VirtualMachineNetworkShaperInfo `xml:"networkShaper,omitempty"` + CpuFeatureMask []VirtualMachineCpuIdInfoSpec `xml:"cpuFeatureMask,omitempty"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr"` + SwapPlacement string `xml:"swapPlacement,omitempty"` + BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty"` + VAppConfig BaseVmConfigSpec `xml:"vAppConfig,omitempty,typeattr"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr"` + RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty"` + VAppConfigRemoved *bool `xml:"vAppConfigRemoved"` + VAssertsEnabled *bool `xml:"vAssertsEnabled"` + ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled"` + Firmware string `xml:"firmware,omitempty"` + MaxMksConnections int32 `xml:"maxMksConnections,omitempty"` + GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` + MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax"` + NestedHVEnabled *bool `xml:"nestedHVEnabled"` + VPMCEnabled *bool `xml:"vPMCEnabled"` + ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty"` + VmProfile []BaseVirtualMachineProfileSpec `xml:"vmProfile,omitempty,typeattr"` + MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` + MigrateEncryption string `xml:"migrateEncryption,omitempty"` + SgxInfo *VirtualMachineSgxInfo `xml:"sgxInfo,omitempty"` + FtEncryptionMode string `xml:"ftEncryptionMode,omitempty"` + GuestMonitoringModeInfo *VirtualMachineGuestMonitoringModeInfo `xml:"guestMonitoringModeInfo,omitempty"` + SevEnabled *bool `xml:"sevEnabled"` + PmemFailoverEnabled *bool `xml:"pmemFailoverEnabled"` + Pmem *VirtualMachineVirtualPMem `xml:"pmem,omitempty"` } func init() { @@ -50208,12 +53122,25 @@ type VirtualMachineConfigSummary struct { ManagedBy *ManagedByInfo `xml:"managedBy,omitempty"` TpmPresent *bool `xml:"tpmPresent"` NumVmiopBackings int32 `xml:"numVmiopBackings,omitempty"` + HwVersion string `xml:"hwVersion,omitempty"` } func init() { t["VirtualMachineConfigSummary"] = reflect.TypeOf((*VirtualMachineConfigSummary)(nil)).Elem() } +type VirtualMachineConnection struct { + DynamicData + + Label string `xml:"label"` + Client string `xml:"client"` + UserName string `xml:"userName"` +} + +func init() { + t["VirtualMachineConnection"] = reflect.TypeOf((*VirtualMachineConnection)(nil)).Elem() +} + type VirtualMachineConsolePreferences struct { DynamicData @@ -50226,6 +53153,17 @@ func init() { t["VirtualMachineConsolePreferences"] = reflect.TypeOf((*VirtualMachineConsolePreferences)(nil)).Elem() } +type VirtualMachineContentLibraryItemInfo struct { + DynamicData + + ContentLibraryItemUuid string `xml:"contentLibraryItemUuid"` + ContentLibraryItemVersion string `xml:"contentLibraryItemVersion,omitempty"` +} + +func init() { + t["VirtualMachineContentLibraryItemInfo"] = reflect.TypeOf((*VirtualMachineContentLibraryItemInfo)(nil)).Elem() +} + type VirtualMachineCpuIdInfoSpec struct { ArrayUpdateSpec @@ -50360,6 +53298,28 @@ func init() { t["VirtualMachineDisplayTopology"] = reflect.TypeOf((*VirtualMachineDisplayTopology)(nil)).Elem() } +type VirtualMachineDynamicPassthroughInfo struct { + VirtualMachineTargetInfo + + VendorName string `xml:"vendorName"` + DeviceName string `xml:"deviceName"` + CustomLabel string `xml:"customLabel,omitempty"` + VendorId int32 `xml:"vendorId"` + DeviceId int32 `xml:"deviceId"` +} + +func init() { + t["VirtualMachineDynamicPassthroughInfo"] = reflect.TypeOf((*VirtualMachineDynamicPassthroughInfo)(nil)).Elem() +} + +type VirtualMachineEmptyIndependentFilterSpec struct { + VirtualMachineBaseIndependentFilterSpec +} + +func init() { + t["VirtualMachineEmptyIndependentFilterSpec"] = reflect.TypeOf((*VirtualMachineEmptyIndependentFilterSpec)(nil)).Elem() +} + type VirtualMachineEmptyProfileSpec struct { VirtualMachineProfileSpec } @@ -50550,6 +53510,17 @@ func init() { t["VirtualMachineGuestIntegrityInfo"] = reflect.TypeOf((*VirtualMachineGuestIntegrityInfo)(nil)).Elem() } +type VirtualMachineGuestMonitoringModeInfo struct { + DynamicData + + GmmFile string `xml:"gmmFile,omitempty"` + GmmAppliance string `xml:"gmmAppliance,omitempty"` +} + +func init() { + t["VirtualMachineGuestMonitoringModeInfo"] = reflect.TypeOf((*VirtualMachineGuestMonitoringModeInfo)(nil)).Elem() +} + type VirtualMachineGuestQuiesceSpec struct { DynamicData @@ -50571,6 +53542,7 @@ type VirtualMachineGuestSummary struct { ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty"` HostName string `xml:"hostName,omitempty"` IpAddress string `xml:"ipAddress,omitempty"` + HwVersion string `xml:"hwVersion,omitempty"` } func init() { @@ -50609,6 +53581,18 @@ func init() { t["VirtualMachineImportSpec"] = reflect.TypeOf((*VirtualMachineImportSpec)(nil)).Elem() } +type VirtualMachineIndependentFilterSpec struct { + VirtualMachineBaseIndependentFilterSpec + + FilterName string `xml:"filterName"` + FilterClass string `xml:"filterClass,omitempty"` + FilterCapabilities []KeyValue `xml:"filterCapabilities,omitempty"` +} + +func init() { + t["VirtualMachineIndependentFilterSpec"] = reflect.TypeOf((*VirtualMachineIndependentFilterSpec)(nil)).Elem() +} + type VirtualMachineInstantCloneSpec struct { DynamicData @@ -50711,6 +53695,14 @@ func init() { t["VirtualMachineMetadataManagerVmMetadataResult"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataResult)(nil)).Elem() } +type VirtualMachineMksConnection struct { + VirtualMachineConnection +} + +func init() { + t["VirtualMachineMksConnection"] = reflect.TypeOf((*VirtualMachineMksConnection)(nil)).Elem() +} + type VirtualMachineMksTicket struct { DynamicData @@ -50778,6 +53770,16 @@ func init() { t["VirtualMachinePciSharedGpuPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem() } +type VirtualMachinePrecisionClockInfo struct { + VirtualMachineTargetInfo + + SystemClockProtocol string `xml:"systemClockProtocol,omitempty"` +} + +func init() { + t["VirtualMachinePrecisionClockInfo"] = reflect.TypeOf((*VirtualMachinePrecisionClockInfo)(nil)).Elem() +} + type VirtualMachineProfileDetails struct { DynamicData @@ -50846,32 +53848,47 @@ func init() { type VirtualMachineQuickStats struct { DynamicData - OverallCpuUsage int32 `xml:"overallCpuUsage,omitempty"` - OverallCpuDemand int32 `xml:"overallCpuDemand,omitempty"` - GuestMemoryUsage int32 `xml:"guestMemoryUsage,omitempty"` - HostMemoryUsage int32 `xml:"hostMemoryUsage,omitempty"` - GuestHeartbeatStatus ManagedEntityStatus `xml:"guestHeartbeatStatus"` - DistributedCpuEntitlement int32 `xml:"distributedCpuEntitlement,omitempty"` - DistributedMemoryEntitlement int32 `xml:"distributedMemoryEntitlement,omitempty"` - StaticCpuEntitlement int32 `xml:"staticCpuEntitlement,omitempty"` - StaticMemoryEntitlement int32 `xml:"staticMemoryEntitlement,omitempty"` - PrivateMemory int32 `xml:"privateMemory,omitempty"` - SharedMemory int32 `xml:"sharedMemory,omitempty"` - SwappedMemory int32 `xml:"swappedMemory,omitempty"` - BalloonedMemory int32 `xml:"balloonedMemory,omitempty"` - ConsumedOverheadMemory int32 `xml:"consumedOverheadMemory,omitempty"` - FtLogBandwidth int32 `xml:"ftLogBandwidth,omitempty"` - FtSecondaryLatency int32 `xml:"ftSecondaryLatency,omitempty"` - FtLatencyStatus ManagedEntityStatus `xml:"ftLatencyStatus,omitempty"` - CompressedMemory int64 `xml:"compressedMemory,omitempty"` - UptimeSeconds int32 `xml:"uptimeSeconds,omitempty"` - SsdSwappedMemory int64 `xml:"ssdSwappedMemory,omitempty"` + OverallCpuUsage int32 `xml:"overallCpuUsage,omitempty"` + OverallCpuDemand int32 `xml:"overallCpuDemand,omitempty"` + OverallCpuReadiness int32 `xml:"overallCpuReadiness,omitempty"` + GuestMemoryUsage int32 `xml:"guestMemoryUsage,omitempty"` + HostMemoryUsage int32 `xml:"hostMemoryUsage,omitempty"` + GuestHeartbeatStatus ManagedEntityStatus `xml:"guestHeartbeatStatus"` + DistributedCpuEntitlement int32 `xml:"distributedCpuEntitlement,omitempty"` + DistributedMemoryEntitlement int32 `xml:"distributedMemoryEntitlement,omitempty"` + StaticCpuEntitlement int32 `xml:"staticCpuEntitlement,omitempty"` + StaticMemoryEntitlement int32 `xml:"staticMemoryEntitlement,omitempty"` + GrantedMemory int32 `xml:"grantedMemory,omitempty"` + PrivateMemory int32 `xml:"privateMemory,omitempty"` + SharedMemory int32 `xml:"sharedMemory,omitempty"` + SwappedMemory int32 `xml:"swappedMemory,omitempty"` + BalloonedMemory int32 `xml:"balloonedMemory,omitempty"` + ConsumedOverheadMemory int32 `xml:"consumedOverheadMemory,omitempty"` + FtLogBandwidth int32 `xml:"ftLogBandwidth,omitempty"` + FtSecondaryLatency int32 `xml:"ftSecondaryLatency,omitempty"` + FtLatencyStatus ManagedEntityStatus `xml:"ftLatencyStatus,omitempty"` + CompressedMemory int64 `xml:"compressedMemory,omitempty"` + UptimeSeconds int32 `xml:"uptimeSeconds,omitempty"` + SsdSwappedMemory int64 `xml:"ssdSwappedMemory,omitempty"` + ActiveMemory int32 `xml:"activeMemory,omitempty"` + MemoryTierStats []VirtualMachineQuickStatsMemoryTierStats `xml:"memoryTierStats,omitempty"` } func init() { t["VirtualMachineQuickStats"] = reflect.TypeOf((*VirtualMachineQuickStats)(nil)).Elem() } +type VirtualMachineQuickStatsMemoryTierStats struct { + DynamicData + + MemoryTierType string `xml:"memoryTierType"` + ReadBandwidth int64 `xml:"readBandwidth"` +} + +func init() { + t["VirtualMachineQuickStatsMemoryTierStats"] = reflect.TypeOf((*VirtualMachineQuickStatsMemoryTierStats)(nil)).Elem() +} + type VirtualMachineRelocateSpec struct { DynamicData @@ -50885,6 +53902,7 @@ type VirtualMachineRelocateSpec struct { Transform VirtualMachineRelocateTransformation `xml:"transform,omitempty"` DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr"` Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + CryptoSpec BaseCryptoSpec `xml:"cryptoSpec,omitempty,typeattr"` } func init() { @@ -50894,17 +53912,30 @@ func init() { type VirtualMachineRelocateSpecDiskLocator struct { DynamicData - DiskId int32 `xml:"diskId"` - Datastore ManagedObjectReference `xml:"datastore"` - DiskMoveType string `xml:"diskMoveType,omitempty"` - DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr"` - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + DiskId int32 `xml:"diskId"` + Datastore ManagedObjectReference `xml:"datastore"` + DiskMoveType string `xml:"diskMoveType,omitempty"` + DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Backing *VirtualMachineRelocateSpecDiskLocatorBackingSpec `xml:"backing,omitempty"` + FilterSpec []BaseVirtualMachineBaseIndependentFilterSpec `xml:"filterSpec,omitempty,typeattr"` } func init() { t["VirtualMachineRelocateSpecDiskLocator"] = reflect.TypeOf((*VirtualMachineRelocateSpecDiskLocator)(nil)).Elem() } +type VirtualMachineRelocateSpecDiskLocatorBackingSpec struct { + DynamicData + + Parent *VirtualMachineRelocateSpecDiskLocatorBackingSpec `xml:"parent,omitempty"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` +} + +func init() { + t["VirtualMachineRelocateSpecDiskLocatorBackingSpec"] = reflect.TypeOf((*VirtualMachineRelocateSpecDiskLocatorBackingSpec)(nil)).Elem() +} + type VirtualMachineRuntimeInfo struct { DynamicData @@ -50912,6 +53943,7 @@ type VirtualMachineRuntimeInfo struct { Host *ManagedObjectReference `xml:"host,omitempty"` ConnectionState VirtualMachineConnectionState `xml:"connectionState"` PowerState VirtualMachinePowerState `xml:"powerState"` + VmFailoverInProgress *bool `xml:"vmFailoverInProgress"` FaultToleranceState VirtualMachineFaultToleranceState `xml:"faultToleranceState,omitempty"` DasVmProtection *VirtualMachineRuntimeInfoDasProtectionState `xml:"dasVmProtection,omitempty"` ToolsInstallerMounted bool `xml:"toolsInstallerMounted"` @@ -50938,6 +53970,7 @@ type VirtualMachineRuntimeInfo struct { QuiescedForkParent *bool `xml:"quiescedForkParent"` InstantCloneFrozen *bool `xml:"instantCloneFrozen"` CryptoState string `xml:"cryptoState,omitempty"` + SuspendedToMemory *bool `xml:"suspendedToMemory"` } func init() { @@ -50986,6 +54019,30 @@ func init() { t["VirtualMachineSerialInfo"] = reflect.TypeOf((*VirtualMachineSerialInfo)(nil)).Elem() } +type VirtualMachineSgxInfo struct { + DynamicData + + EpcSize int64 `xml:"epcSize"` + FlcMode string `xml:"flcMode,omitempty"` + LePubKeyHash string `xml:"lePubKeyHash,omitempty"` +} + +func init() { + t["VirtualMachineSgxInfo"] = reflect.TypeOf((*VirtualMachineSgxInfo)(nil)).Elem() +} + +type VirtualMachineSgxTargetInfo struct { + VirtualMachineTargetInfo + + MaxEpcSize int64 `xml:"maxEpcSize"` + FlcModes []string `xml:"flcModes,omitempty"` + LePubKeyHashes []string `xml:"lePubKeyHashes,omitempty"` +} + +func init() { + t["VirtualMachineSgxTargetInfo"] = reflect.TypeOf((*VirtualMachineSgxTargetInfo)(nil)).Elem() +} + type VirtualMachineSnapshotInfo struct { DynamicData @@ -51118,6 +54175,7 @@ type VirtualMachineTicket struct { Host string `xml:"host,omitempty"` Port int32 `xml:"port,omitempty"` SslThumbprint string `xml:"sslThumbprint,omitempty"` + Url string `xml:"url,omitempty"` } func init() { @@ -51235,6 +54293,16 @@ func init() { t["VirtualMachineVMIROM"] = reflect.TypeOf((*VirtualMachineVMIROM)(nil)).Elem() } +type VirtualMachineVcpuConfig struct { + DynamicData + + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty"` +} + +func init() { + t["VirtualMachineVcpuConfig"] = reflect.TypeOf((*VirtualMachineVcpuConfig)(nil)).Elem() +} + type VirtualMachineVideoCard struct { VirtualDevice @@ -51250,6 +54318,16 @@ func init() { t["VirtualMachineVideoCard"] = reflect.TypeOf((*VirtualMachineVideoCard)(nil)).Elem() } +type VirtualMachineVirtualPMem struct { + DynamicData + + SnapshotMode string `xml:"snapshotMode,omitempty"` +} + +func init() { + t["VirtualMachineVirtualPMem"] = reflect.TypeOf((*VirtualMachineVirtualPMem)(nil)).Elem() +} + type VirtualMachineWindowsQuiesceSpec struct { VirtualMachineGuestQuiesceSpec @@ -51277,7 +54355,8 @@ func init() { type VirtualNVDIMM struct { VirtualDevice - CapacityInMB int64 `xml:"capacityInMB"` + CapacityInMB int64 `xml:"capacityInMB"` + ConfiguredCapacityInMB int64 `xml:"configuredCapacityInMB,omitempty"` } func init() { @@ -51395,6 +54474,20 @@ func init() { t["VirtualPCIPassthrough"] = reflect.TypeOf((*VirtualPCIPassthrough)(nil)).Elem() } +type VirtualPCIPassthroughAllowedDevice struct { + DynamicData + + VendorId int32 `xml:"vendorId"` + DeviceId int32 `xml:"deviceId"` + SubVendorId int32 `xml:"subVendorId,omitempty"` + SubDeviceId int32 `xml:"subDeviceId,omitempty"` + RevisionId int16 `xml:"revisionId,omitempty"` +} + +func init() { + t["VirtualPCIPassthroughAllowedDevice"] = reflect.TypeOf((*VirtualPCIPassthroughAllowedDevice)(nil)).Elem() +} + type VirtualPCIPassthroughDeviceBackingInfo struct { VirtualDeviceDeviceBackingInfo @@ -51416,6 +54509,26 @@ func init() { t["VirtualPCIPassthroughDeviceBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingOption)(nil)).Elem() } +type VirtualPCIPassthroughDynamicBackingInfo struct { + VirtualDeviceDeviceBackingInfo + + AllowedDevice []VirtualPCIPassthroughAllowedDevice `xml:"allowedDevice,omitempty"` + CustomLabel string `xml:"customLabel,omitempty"` + AssignedId string `xml:"assignedId,omitempty"` +} + +func init() { + t["VirtualPCIPassthroughDynamicBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughDynamicBackingInfo)(nil)).Elem() +} + +type VirtualPCIPassthroughDynamicBackingOption struct { + VirtualDeviceDeviceBackingOption +} + +func init() { + t["VirtualPCIPassthroughDynamicBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughDynamicBackingOption)(nil)).Elem() +} + type VirtualPCIPassthroughOption struct { VirtualDeviceOption } @@ -51443,7 +54556,8 @@ func init() { type VirtualPCIPassthroughVmiopBackingInfo struct { VirtualPCIPassthroughPluginBackingInfo - Vgpu string `xml:"vgpu,omitempty"` + Vgpu string `xml:"vgpu,omitempty"` + MigrateSupported *bool `xml:"migrateSupported"` } func init() { @@ -51582,6 +54696,42 @@ func init() { t["VirtualPointingDeviceOption"] = reflect.TypeOf((*VirtualPointingDeviceOption)(nil)).Elem() } +type VirtualPrecisionClock struct { + VirtualDevice +} + +func init() { + t["VirtualPrecisionClock"] = reflect.TypeOf((*VirtualPrecisionClock)(nil)).Elem() +} + +type VirtualPrecisionClockOption struct { + VirtualDeviceOption +} + +func init() { + t["VirtualPrecisionClockOption"] = reflect.TypeOf((*VirtualPrecisionClockOption)(nil)).Elem() +} + +type VirtualPrecisionClockSystemClockBackingInfo struct { + VirtualDeviceBackingInfo + + Protocol string `xml:"protocol,omitempty"` +} + +func init() { + t["VirtualPrecisionClockSystemClockBackingInfo"] = reflect.TypeOf((*VirtualPrecisionClockSystemClockBackingInfo)(nil)).Elem() +} + +type VirtualPrecisionClockSystemClockBackingOption struct { + VirtualDeviceBackingOption + + Protocol ChoiceOption `xml:"protocol"` +} + +func init() { + t["VirtualPrecisionClockSystemClockBackingOption"] = reflect.TypeOf((*VirtualPrecisionClockSystemClockBackingOption)(nil)).Elem() +} + type VirtualSATAController struct { VirtualController } @@ -52137,6 +55287,27 @@ func init() { t["VirtualVmxnetOption"] = reflect.TypeOf((*VirtualVmxnetOption)(nil)).Elem() } +type VirtualWDT struct { + VirtualDevice + + RunOnBoot bool `xml:"runOnBoot"` + Running bool `xml:"running"` +} + +func init() { + t["VirtualWDT"] = reflect.TypeOf((*VirtualWDT)(nil)).Elem() +} + +type VirtualWDTOption struct { + VirtualDeviceOption + + RunOnBoot BoolOption `xml:"runOnBoot"` +} + +func init() { + t["VirtualWDTOption"] = reflect.TypeOf((*VirtualWDTOption)(nil)).Elem() +} + type VlanProfile struct { ApplyProfile } @@ -54163,6 +57334,17 @@ func init() { t["VsanClusterUuidMismatchFault"] = reflect.TypeOf((*VsanClusterUuidMismatchFault)(nil)).Elem() } +type VsanDatastoreInfo struct { + DatastoreInfo + + MembershipUuid string `xml:"membershipUuid,omitempty"` + AccessGenNo int32 `xml:"accessGenNo,omitempty"` +} + +func init() { + t["VsanDatastoreInfo"] = reflect.TypeOf((*VsanDatastoreInfo)(nil)).Elem() +} + type VsanDiskFault struct { VsanFault @@ -54651,8 +57833,9 @@ func init() { type VslmCloneSpec struct { VslmMigrateSpec - Name string `xml:"name"` - KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm"` + Name string `xml:"name"` + KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm"` + Metadata []KeyValue `xml:"metadata,omitempty"` } func init() { @@ -54667,6 +57850,8 @@ type VslmCreateSpec struct { BackingSpec BaseVslmCreateSpecBackingSpec `xml:"backingSpec,typeattr"` CapacityInMB int64 `xml:"capacityInMB"` Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr"` + Metadata []KeyValue `xml:"metadata,omitempty"` } func init() { @@ -54711,6 +57896,7 @@ type VslmMigrateSpec struct { BackingSpec BaseVslmCreateSpecBackingSpec `xml:"backingSpec,typeattr"` Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr"` Consolidate *bool `xml:"consolidate"` + DisksCrypto *DiskCryptoSpec `xml:"disksCrypto,omitempty"` } func init() { @@ -54872,6 +58058,29 @@ func init() { t["VspanSameSessionPortConflictFault"] = reflect.TypeOf((*VspanSameSessionPortConflictFault)(nil)).Elem() } +type VstorageObjectVCenterQueryChangedDiskAreas VstorageObjectVCenterQueryChangedDiskAreasRequestType + +func init() { + t["VstorageObjectVCenterQueryChangedDiskAreas"] = reflect.TypeOf((*VstorageObjectVCenterQueryChangedDiskAreas)(nil)).Elem() +} + +type VstorageObjectVCenterQueryChangedDiskAreasRequestType struct { + This ManagedObjectReference `xml:"_this"` + Id ID `xml:"id"` + Datastore ManagedObjectReference `xml:"datastore"` + SnapshotId ID `xml:"snapshotId"` + StartOffset int64 `xml:"startOffset"` + ChangeId string `xml:"changeId"` +} + +func init() { + t["VstorageObjectVCenterQueryChangedDiskAreasRequestType"] = reflect.TypeOf((*VstorageObjectVCenterQueryChangedDiskAreasRequestType)(nil)).Elem() +} + +type VstorageObjectVCenterQueryChangedDiskAreasResponse struct { + Returnval DiskChangeInfo `xml:"returnval"` +} + type VvolDatastoreInfo struct { DatastoreInfo @@ -55431,3 +58640,13 @@ type VslmInfrastructureObjectPolicySpec struct { func init() { t["vslmInfrastructureObjectPolicySpec"] = reflect.TypeOf((*VslmInfrastructureObjectPolicySpec)(nil)).Elem() } + +type VslmVClockInfo struct { + DynamicData + + VClockTime int64 `xml:"vClockTime"` +} + +func init() { + t["vslmVClockInfo"] = reflect.TypeOf((*VslmVClockInfo)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/unreleased.go b/vendor/github.com/vmware/govmomi/vim25/types/unreleased.go new file mode 100644 index 000000000..167184e8d --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/unreleased.go @@ -0,0 +1,117 @@ +/* + Copyright (c) 2022 VMware, Inc. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "reflect" + +type ArrayOfPlaceVmsXClusterResultPlacementFaults struct { + PlaceVmsXClusterResultPlacementFaults []PlaceVmsXClusterResultPlacementFaults `xml:"PlaceVmsXClusterResultPlacementFaults,omitempty"` +} + +func init() { + t["ArrayOfPlaceVmsXClusterResultPlacementFaults"] = reflect.TypeOf((*ArrayOfPlaceVmsXClusterResultPlacementFaults)(nil)).Elem() +} + +type ArrayOfPlaceVmsXClusterResultPlacementInfo struct { + PlaceVmsXClusterResultPlacementInfo []PlaceVmsXClusterResultPlacementInfo `xml:"PlaceVmsXClusterResultPlacementInfo,omitempty"` +} + +func init() { + t["ArrayOfPlaceVmsXClusterResultPlacementInfo"] = reflect.TypeOf((*ArrayOfPlaceVmsXClusterResultPlacementInfo)(nil)).Elem() +} + +type ArrayOfPlaceVmsXClusterSpecVmPlacementSpec struct { + PlaceVmsXClusterSpecVmPlacementSpec []PlaceVmsXClusterSpecVmPlacementSpec `xml:"PlaceVmsXClusterSpecVmPlacementSpec,omitempty"` +} + +func init() { + t["ArrayOfPlaceVmsXClusterSpecVmPlacementSpec"] = reflect.TypeOf((*ArrayOfPlaceVmsXClusterSpecVmPlacementSpec)(nil)).Elem() +} + +type PlaceVmsXCluster PlaceVmsXClusterRequestType + +func init() { + t["PlaceVmsXCluster"] = reflect.TypeOf((*PlaceVmsXCluster)(nil)).Elem() +} + +type PlaceVmsXClusterRequestType struct { + This ManagedObjectReference `xml:"_this"` + PlacementSpec PlaceVmsXClusterSpec `xml:"placementSpec"` +} + +func init() { + t["PlaceVmsXClusterRequestType"] = reflect.TypeOf((*PlaceVmsXClusterRequestType)(nil)).Elem() +} + +type PlaceVmsXClusterResponse struct { + Returnval PlaceVmsXClusterResult `xml:"returnval"` +} + +type PlaceVmsXClusterResult struct { + DynamicData + + PlacementInfos []PlaceVmsXClusterResultPlacementInfo `xml:"placementInfos,omitempty"` + Faults []PlaceVmsXClusterResultPlacementFaults `xml:"faults,omitempty"` +} + +func init() { + t["PlaceVmsXClusterResult"] = reflect.TypeOf((*PlaceVmsXClusterResult)(nil)).Elem() +} + +type PlaceVmsXClusterResultPlacementFaults struct { + DynamicData + + ResourcePool ManagedObjectReference `xml:"resourcePool"` + VmName string `xml:"vmName"` + Faults []LocalizedMethodFault `xml:"faults,omitempty"` +} + +func init() { + t["PlaceVmsXClusterResultPlacementFaults"] = reflect.TypeOf((*PlaceVmsXClusterResultPlacementFaults)(nil)).Elem() +} + +type PlaceVmsXClusterResultPlacementInfo struct { + DynamicData + + VmName string `xml:"vmName"` + Recommendation ClusterRecommendation `xml:"recommendation"` +} + +func init() { + t["PlaceVmsXClusterResultPlacementInfo"] = reflect.TypeOf((*PlaceVmsXClusterResultPlacementInfo)(nil)).Elem() +} + +type PlaceVmsXClusterSpec struct { + DynamicData + + ResourcePools []ManagedObjectReference `xml:"resourcePools,omitempty"` + VmPlacementSpecs []PlaceVmsXClusterSpecVmPlacementSpec `xml:"vmPlacementSpecs,omitempty"` +} + +func init() { + t["PlaceVmsXClusterSpec"] = reflect.TypeOf((*PlaceVmsXClusterSpec)(nil)).Elem() +} + +type PlaceVmsXClusterSpecVmPlacementSpec struct { + DynamicData + + ConfigSpec VirtualMachineConfigSpec `xml:"configSpec"` +} + +func init() { + t["PlaceVmsXClusterSpecVmPlacementSpec"] = reflect.TypeOf((*PlaceVmsXClusterSpecVmPlacementSpec)(nil)).Elem() +} diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE b/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE index 744875676..6a66aea5e 100644 --- a/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE +++ b/vendor/github.com/vmware/govmomi/vim25/xml/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go b/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go index 39bbac1d1..c0c0a588b 100644 --- a/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go +++ b/vendor/github.com/vmware/govmomi/vim25/xml/marshal.go @@ -16,7 +16,7 @@ import ( ) const ( - // A generic XML header suitable for use with the output of Marshal. + // Header is a generic XML header suitable for use with the output of Marshal. // This is not automatically added to any output of this package, // it is provided as a convenience. Header = `` + "\n" @@ -24,21 +24,21 @@ const ( // Marshal returns the XML encoding of v. // -// Marshal handles an array or slice by marshalling each of the elements. -// Marshal handles a pointer by marshalling the value it points at or, if the -// pointer is nil, by writing nothing. Marshal handles an interface value by -// marshalling the value it contains or, if the interface value is nil, by -// writing nothing. Marshal handles all other data by writing one or more XML +// Marshal handles an array or slice by marshaling each of the elements. +// Marshal handles a pointer by marshaling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshaling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML // elements containing the data. // // The name for the XML elements is taken from, in order of preference: // - the tag on the XMLName field, if the data is a struct -// - the value of the XMLName field of type xml.Name +// - the value of the XMLName field of type Name // - the tag of the struct field used to obtain the data // - the name of the struct field used to obtain the data -// - the name of the marshalled type +// - the name of the marshaled type // -// The XML element for a struct contains marshalled elements for each of the +// The XML element for a struct contains marshaled elements for each of the // exported fields of the struct, with these exceptions: // - the XMLName field, described above, is omitted. // - a field with tag "-" is omitted. @@ -48,10 +48,12 @@ const ( // field name in the XML element. // - a field with tag ",chardata" is written as character data, // not as an XML element. +// - a field with tag ",cdata" is written as character data +// wrapped in one or more tags, not as an XML element. // - a field with tag ",innerxml" is written verbatim, not subject -// to the usual marshalling procedure. +// to the usual marshaling procedure. // - a field with tag ",comment" is written as an XML comment, not -// subject to the usual marshalling procedure. It must not contain +// subject to the usual marshaling procedure. It must not contain // the "--" string within it. // - a field with a tag including the "omitempty" option is omitted // if the field value is empty. The empty values are false, 0, any @@ -59,11 +61,18 @@ const ( // string of length zero. // - an anonymous struct field is handled as if the fields of its // value were part of the outer struct. +// - a field implementing Marshaler is written by calling its MarshalXML +// method. +// - a field implementing encoding.TextMarshaler is written by encoding the +// result of its MarshalText method as text. // // If a field uses a tag "a>b>c", then the element c will be nested inside -// parent elements a and b. Fields that appear next to each other that name +// parent elements a and b. Fields that appear next to each other that name // the same parent will be enclosed in one XML element. // +// If the XML name for a struct field is defined by both the field tag and the +// struct's XMLName field, the names must match. +// // See MarshalIndent for an example. // // Marshal will return an error if asked to marshal a channel, function, or map. @@ -173,9 +182,9 @@ func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { } var ( - endComment = []byte("-->") - endProcInst = []byte("?>") - endDirective = []byte(">") + begComment = []byte("") + endProcInst = []byte("?>") ) // EncodeToken writes the given XML token to the stream. @@ -191,6 +200,7 @@ var ( // EncodeToken allows writing a ProcInst with Target set to "xml" only as the first token // in the stream. func (enc *Encoder) EncodeToken(t Token) error { + p := &enc.p switch t := t.(type) { case StartElement: @@ -202,7 +212,7 @@ func (enc *Encoder) EncodeToken(t Token) error { return err } case CharData: - EscapeText(p, t) + escapeText(p, t, false) case Comment: if bytes.Contains(t, endComment) { return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") @@ -213,7 +223,7 @@ func (enc *Encoder) EncodeToken(t Token) error { return p.cachedWriteError() case ProcInst: // First token to be encoded which is also a ProcInst with target of xml - // is the xml declaration. The only ProcInst where target of xml is allowed. + // is the xml declaration. The only ProcInst where target of xml is allowed. if t.Target == "xml" && p.Buffered() != 0 { return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") } @@ -231,16 +241,59 @@ func (enc *Encoder) EncodeToken(t Token) error { } p.WriteString("?>") case Directive: - if bytes.Contains(t, endDirective) { - return fmt.Errorf("xml: EncodeToken of Directive containing > marker") + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") } p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + } return p.cachedWriteError() } +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + // Flush flushes any buffered XML to the underlying writer. // See the EncodeToken documentation for details about when it is necessary. func (enc *Encoder) Flush() error { @@ -274,7 +327,7 @@ func (p *printer) createAttrPrefix(url string) string { // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", // but users should not be trying to use that one directly - that's our job.) if url == xmlURL { - return "xml" + return xmlPrefix } // Need to define a new name space. @@ -426,8 +479,11 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat xmlname := tinfo.xmlname if xmlname.name != "" { start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name - } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { - start.Name = v + } else { + fv := xmlname.value(val, dontInitNilPointers) + if v, ok := fv.Interface().(Name); ok && v.Local != "" { + start.Name = v + } } } if start.Name.Local == "" && finfo != nil { @@ -452,8 +508,7 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat if finfo.flags&fAttr == 0 { continue } - fv := finfo.value(val) - name := Name{Space: finfo.xmlns, Local: finfo.name} + fv := finfo.value(val, dontInitNilPointers) if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { continue @@ -463,69 +518,10 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat continue } - if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { - attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) - if err != nil { - return err - } - if attr.Name.Local != "" { - start.Attr = append(start.Attr, attr) - } - continue - } - - if fv.CanAddr() { - pv := fv.Addr() - if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { - attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) - if err != nil { - return err - } - if attr.Name.Local != "" { - start.Attr = append(start.Attr, attr) - } - continue - } - } - - if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { - text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - start.Attr = append(start.Attr, Attr{name, string(text)}) - continue - } - - if fv.CanAddr() { - pv := fv.Addr() - if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { - text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - start.Attr = append(start.Attr, Attr{name, string(text)}) - continue - } - } - - // Dereference or skip nil pointer, interface values. - switch fv.Kind() { - case reflect.Ptr, reflect.Interface: - if fv.IsNil() { - continue - } - fv = fv.Elem() - } - - s, b, err := p.marshalSimple(fv.Type(), fv) - if err != nil { + name := Name{Space: finfo.xmlns, Local: finfo.name} + if err := p.marshalAttr(&start, name, fv); err != nil { return err } - if b != nil { - s = string(b) - } - start.Attr = append(start.Attr, Attr{name, s}) } if err := p.writeStart(&start); err != nil { @@ -555,6 +551,90 @@ func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplat return p.cachedWriteError() } +// marshalAttr marshals an attribute with the given name and value, adding to start.Attr. +func (p *printer) marshalAttr(start *StartElement, name Name, val reflect.Value) error { + if val.CanInterface() && val.Type().Implements(marshalerAttrType) { + attr, err := val.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + return nil + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + if err != nil { + return err + } + if attr.Name.Local != "" { + start.Attr = append(start.Attr, attr) + } + return nil + } + } + + if val.CanInterface() && val.Type().Implements(textMarshalerType) { + text, err := val.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + return nil + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + start.Attr = append(start.Attr, Attr{name, string(text)}) + return nil + } + } + + // Dereference or skip nil pointer, interface values. + switch val.Kind() { + case reflect.Ptr, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + + // Walk slices. + if val.Kind() == reflect.Slice && val.Type().Elem().Kind() != reflect.Uint8 { + n := val.Len() + for i := 0; i < n; i++ { + if err := p.marshalAttr(start, name, val.Index(i)); err != nil { + return err + } + } + return nil + } + + if val.Type() == attrType { + start.Attr = append(start.Attr, val.Interface().(Attr)) + return nil + } + + s, b, err := p.marshalSimple(val.Type(), val) + if err != nil { + return err + } + if b != nil { + s = string(b) + } + start.Attr = append(start.Attr, Attr{name, s}) + return nil +} + // defaultStart returns the default start element to use, // given the reflect type, field info, and start template. func defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { @@ -716,6 +796,20 @@ func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, [] var ddBytes = []byte("--") +// indirect drills into interfaces and pointers, returning the pointed-at value. +// If it encounters a nil interface or pointer, indirect returns that nil value. +// This can turn into an infinite loop given a cyclic chain, +// but it matches the Go 1 behavior. +func indirect(vf reflect.Value) reflect.Value { + for vf.Kind() == reflect.Interface || vf.Kind() == reflect.Ptr { + if vf.IsNil() { + return vf + } + vf = vf.Elem() + } + return vf +} + func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { s := parentStack{p: p} for i := range tinfo.fields { @@ -723,24 +817,30 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { if finfo.flags&fAttr != 0 { continue } - vf := finfo.value(val) - - // Dereference or skip nil pointer, interface values. - switch vf.Kind() { - case reflect.Ptr, reflect.Interface: - if !vf.IsNil() { - vf = vf.Elem() - } + vf := finfo.value(val, dontInitNilPointers) + if !vf.IsValid() { + // The field is behind an anonymous struct field that's + // nil. Skip it. + continue } switch finfo.flags & fMode { - case fCharData: + case fCDATA, fCharData: + emit := EscapeText + if finfo.flags&fMode == fCDATA { + emit = emitCDATA + } + if err := s.trim(finfo.parents); err != nil { + return err + } if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } - Escape(p, data) + if err := emit(p, data); err != nil { + return err + } continue } if vf.CanAddr() { @@ -750,27 +850,39 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { if err != nil { return err } - Escape(p, data) + if err := emit(p, data); err != nil { + return err + } continue } } + var scratch [64]byte + vf = indirect(vf) switch vf.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + if err := emit(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)); err != nil { + return err + } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + if err := emit(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)); err != nil { + return err + } case reflect.Float32, reflect.Float64: - Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + if err := emit(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())); err != nil { + return err + } case reflect.Bool: - Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + if err := emit(p, strconv.AppendBool(scratch[:0], vf.Bool())); err != nil { + return err + } case reflect.String: - if err := EscapeText(p, []byte(vf.String())); err != nil { + if err := emit(p, []byte(vf.String())); err != nil { return err } case reflect.Slice: if elem, ok := vf.Interface().([]byte); ok { - if err := EscapeText(p, elem); err != nil { + if err := emit(p, elem); err != nil { return err } } @@ -778,6 +890,10 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { continue case fComment: + if err := s.trim(finfo.parents); err != nil { + return err + } + vf = indirect(vf) k := vf.Kind() if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) @@ -792,14 +908,14 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { switch k { case reflect.String: s := vf.String() - dashDash = strings.Index(s, "--") >= 0 + dashDash = strings.Contains(s, "--") dashLast = s[len(s)-1] == '-' if !dashDash { p.WriteString(s) } case reflect.Slice: b := vf.Bytes() - dashDash = bytes.Index(b, ddBytes) >= 0 + dashDash = bytes.Contains(b, ddBytes) dashLast = b[len(b)-1] == '-' if !dashDash { p.Write(b) @@ -817,7 +933,8 @@ func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { p.WriteString("-->") continue - case fInnerXml: + case fInnerXML: + vf = indirect(vf) iface := vf.Interface() switch raw := iface.(type) { case []byte: @@ -891,8 +1008,8 @@ type parentStack struct { } // trim updates the XML context to match the longest common prefix of the stack -// and the given parents. A closing tag will be written for every parent -// popped. Passing a zero slice or nil will close all the elements. +// and the given parents. A closing tag will be written for every parent +// popped. Passing a zero slice or nil will close all the elements. func (s *parentStack) trim(parents []string) error { split := 0 for ; split < len(parents) && split < len(s.stack); split++ { @@ -905,7 +1022,7 @@ func (s *parentStack) trim(parents []string) error { return err } } - s.stack = parents[:split] + s.stack = s.stack[:split] return nil } @@ -920,7 +1037,7 @@ func (s *parentStack) push(parents []string) error { return nil } -// A MarshalXMLError is returned when Marshal encounters a type +// UnsupportedTypeError is returned when Marshal encounters a type // that cannot be converted into XML. type UnsupportedTypeError struct { Type reflect.Type diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/read.go b/vendor/github.com/vmware/govmomi/vim25/xml/read.go index fe35fce6c..ea61352f6 100644 --- a/vendor/github.com/vmware/govmomi/vim25/xml/read.go +++ b/vendor/github.com/vmware/govmomi/vim25/xml/read.go @@ -1,4 +1,4 @@ -// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -27,7 +27,7 @@ import ( // discarded. // // Because Unmarshal uses the reflect package, it can only assign -// to exported (upper case) fields. Unmarshal uses a case-sensitive +// to exported (upper case) fields. Unmarshal uses a case-sensitive // comparison to match XML element names to tag values and struct // field names. // @@ -37,9 +37,9 @@ import ( // // * If the struct has a field of type []byte or string with tag // ",innerxml", Unmarshal accumulates the raw XML nested inside the -// element in that field. The rest of the rules still apply. +// element in that field. The rest of the rules still apply. // -// * If the struct has a field named XMLName of type xml.Name, +// * If the struct has a field named XMLName of type Name, // Unmarshal records the element name in that field. // // * If the XMLName field has an associated tag of the form @@ -52,6 +52,11 @@ import ( // the explicit name in a struct field tag of the form "name,attr", // Unmarshal records the attribute value in that field. // +// * If the XML element has an attribute not handled by the previous +// rule and the struct has a field with an associated tag containing +// ",any,attr", Unmarshal records the attribute value in the first +// such field. +// // * If the XML element contains character data, that data is // accumulated in the first struct field that has tag ",chardata". // The struct field may have type []byte or string. @@ -59,7 +64,7 @@ import ( // // * If the XML element contains comments, they are accumulated in // the first struct field that has tag ",comment". The struct -// field may have type []byte or string. If there is no such +// field may have type []byte or string. If there is no such // field, the comments are discarded. // // * If the XML element contains a sub-element whose name matches @@ -85,7 +90,12 @@ import ( // * An anonymous struct field is handled as if the fields of its // value were part of the outer struct. // -// * A struct field with tag "-" is never unmarshalled into. +// * A struct field with tag "-" is never unmarshaled into. +// +// If Unmarshal encounters a field type that implements the Unmarshaler +// interface, Unmarshal calls its UnmarshalXML method to produce the value from +// the XML element. Otherwise, if the value implements +// encoding.TextUnmarshaler, Unmarshal calls that value's UnmarshalText method. // // Unmarshal maps an XML element to a string or []byte by saving the // concatenation of that element's character data in the string or @@ -94,34 +104,42 @@ import ( // Unmarshal maps an attribute value to a string or []byte by saving // the value in the string or slice. // -// Unmarshal maps an XML element to a slice by extending the length of -// the slice and mapping the element to the newly created value. +// Unmarshal maps an attribute value to an Attr by saving the attribute, +// including its name, in the Attr. +// +// Unmarshal maps an XML element or attribute value to a slice by +// extending the length of the slice and mapping the element or attribute +// to the newly created value. // // Unmarshal maps an XML element or attribute value to a bool by -// setting it to the boolean value represented by the string. +// setting it to the boolean value represented by the string. Whitespace +// is trimmed and ignored. // // Unmarshal maps an XML element or attribute value to an integer or // floating-point field by setting the field to the result of -// interpreting the string value in decimal. There is no check for -// overflow. +// interpreting the string value in decimal. There is no check for +// overflow. Whitespace is trimmed and ignored. // -// Unmarshal maps an XML element to an xml.Name by recording the -// element name. +// Unmarshal maps an XML element to a Name by recording the element +// name. // // Unmarshal maps an XML element to a pointer by setting the pointer // to a freshly allocated value and then mapping the element to that value. // +// A missing element or empty attribute value will be unmarshaled as a zero value. +// If the field is a slice, a zero value will be appended to the field. Otherwise, the +// field will be set to its zero value. func Unmarshal(data []byte, v interface{}) error { return NewDecoder(bytes.NewReader(data)).Decode(v) } -// Decode works like xml.Unmarshal, except it reads the decoder +// Decode works like Unmarshal, except it reads the decoder // stream to find the start element. func (d *Decoder) Decode(v interface{}) error { return d.DecodeElement(v, nil) } -// DecodeElement works like xml.Unmarshal except that it takes +// DecodeElement works like Unmarshal except that it takes // a pointer to the start XML element to decode into v. // It is useful when a client reads some raw XML tokens itself // but also wants to defer to Unmarshal for some elements. @@ -133,7 +151,7 @@ func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { return d.unmarshal(val.Elem(), start) } -// An UnmarshalError represents an error in the unmarshalling process. +// An UnmarshalError represents an error in the unmarshaling process. type UnmarshalError string func (e UnmarshalError) Error() string { return string(e) } @@ -148,7 +166,7 @@ func (e UnmarshalError) Error() string { return string(e) } // UnmarshalXML must consume exactly one XML element. // One common implementation strategy is to unmarshal into // a separate value with a layout matching the expected XML -// using d.DecodeElement, and then to copy the data from +// using d.DecodeElement, and then to copy the data from // that value into the receiver. // Another common strategy is to use d.Token to process the // XML object one token at a time. @@ -180,19 +198,19 @@ func receiverType(val interface{}) string { // unmarshalInterface unmarshals a single XML element into val. // start is the opening tag of the element. -func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { +func (d *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { // Record that decoder must stop at end tag corresponding to start. - p.pushEOF() + d.pushEOF() - p.unmarshalDepth++ - err := val.UnmarshalXML(p, *start) - p.unmarshalDepth-- + d.unmarshalDepth++ + err := val.UnmarshalXML(d, *start) + d.unmarshalDepth-- if err != nil { - p.popEOF() + d.popEOF() return err } - if !p.popEOF() { + if !d.popEOF() { return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) } @@ -202,11 +220,11 @@ func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error // unmarshalTextInterface unmarshals a single XML element into val. // The chardata contained in the element (but not its children) // is passed to the text unmarshaler. -func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { +func (d *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler) error { var buf []byte depth := 1 for depth > 0 { - t, err := p.Token() + t, err := d.Token() if err != nil { return err } @@ -225,14 +243,13 @@ func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *St } // unmarshalAttr unmarshals a single XML attribute into val. -func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { +func (d *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { if val.Kind() == reflect.Ptr { if val.IsNil() { val.Set(reflect.New(val.Type().Elem())) } val = val.Elem() } - if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. @@ -258,11 +275,30 @@ func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { } } - copyValue(val, []byte(attr.Value)) - return nil + if val.Type().Kind() == reflect.Slice && val.Type().Elem().Kind() != reflect.Uint8 { + // Slice of element values. + // Grow slice. + n := val.Len() + val.Set(reflect.Append(val, reflect.Zero(val.Type().Elem()))) + + // Recur to read element into slice. + if err := d.unmarshalAttr(val.Index(n), attr); err != nil { + val.SetLen(n) + return err + } + return nil + } + + if val.Type() == attrType { + val.Set(reflect.ValueOf(attr)) + return nil + } + + return copyValue(val, []byte(attr.Value)) } var ( + attrType = reflect.TypeOf(Attr{}) unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() @@ -271,21 +307,9 @@ var ( // Find reflect.Type for an element's type attribute. func (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect.Type { t := "" - for i, a := range start.Attr { + for _, a := range start.Attr { if a.Name == xmlSchemaInstance || a.Name == xsiType { t = a.Value - // HACK: ensure xsi:type is last in the list to avoid using that value for - // a "type" attribute, such as ManagedObjectReference.Type for example. - // Note that xsi:type is already the last attribute in VC/ESX responses. - // This is only an issue with govmomi simulator generated responses. - // Proper fix will require finding a few needles in this xml package haystack. - // Note: govmomi uses xmlSchemaInstance, other clients (e.g. rbvmomi) use xsiType. - // They are the same thing to XML parsers, but not to this hack here. - x := len(start.Attr) - 1 - if i != x { - start.Attr[i] = start.Attr[x] - start.Attr[x] = a - } break } } @@ -312,11 +336,11 @@ func (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect } // Unmarshal a single XML element into val. -func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { +func (d *Decoder) unmarshal(val reflect.Value, start *StartElement) error { // Find start element if we need it. if start == nil { for { - tok, err := p.Token() + tok, err := d.Token() if err != nil { return err } @@ -329,10 +353,10 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { // Try to figure out type for empty interface values. if val.Kind() == reflect.Interface && val.IsNil() { - typ := p.typeForElement(val, start) + typ := d.typeForElement(val, start) if typ != nil { pval := reflect.New(typ).Elem() - err := p.unmarshal(pval, start) + err := d.unmarshal(pval, start) if err != nil { return err } @@ -371,24 +395,24 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { if val.CanInterface() && val.Type().Implements(unmarshalerType) { // This is an unmarshaler with a non-pointer receiver, // so it's likely to be incorrect, but we do what we're told. - return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + return d.unmarshalInterface(val.Interface().(Unmarshaler), start) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { - return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + return d.unmarshalInterface(pv.Interface().(Unmarshaler), start) } } if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { - return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + return d.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler)) } if val.CanAddr() { pv := val.Addr() if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { - return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + return d.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler)) } } @@ -414,7 +438,7 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { // TODO: For now, simply ignore the field. In the near // future we may choose to unmarshal the start // element on it, if not nil. - return p.Skip() + return d.Skip() case reflect.Slice: typ := v.Type() @@ -427,19 +451,10 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { // Slice of element values. // Grow slice. n := v.Len() - if n >= v.Cap() { - ncap := 2 * n - if ncap < 4 { - ncap = 4 - } - new := reflect.MakeSlice(typ, n, ncap) - reflect.Copy(new, v) - v.Set(new) - } - v.SetLen(n + 1) + v.Set(reflect.Append(val, reflect.Zero(v.Type().Elem()))) // Recur to read element into slice. - if err := p.unmarshal(v.Index(n), start); err != nil { + if err := d.unmarshal(v.Index(n), start); err != nil { v.SetLen(n) return err } @@ -476,52 +491,74 @@ func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { } return UnmarshalError(e) } - fv := finfo.value(sv) + fv := finfo.value(sv, initNilPointers) if _, ok := fv.Interface().(Name); ok { fv.Set(reflect.ValueOf(start.Name)) } } // Assign attributes. - // Also, determine whether we need to save character data or comments. - for i := range tinfo.fields { - finfo := &tinfo.fields[i] - switch finfo.flags & fMode { - case fAttr: - strv := finfo.value(sv) - // Look for attribute. - for _, a := range start.Attr { + for _, a := range start.Attr { + handled := false + any := -1 + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv, initNilPointers) if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { - if err := p.unmarshalAttr(strv, a); err != nil { - return err + needTypeAttr := (finfo.flags & fTypeAttr) != 0 + // HACK: avoid using xsi:type value for a "type" attribute, such as ManagedObjectReference.Type for example. + if needTypeAttr || (a.Name != xmlSchemaInstance && a.Name != xsiType) { + if err := d.unmarshalAttr(strv, a); err != nil { + return err + } } - break + handled = true } + + case fAny | fAttr: + if any == -1 { + any = i + } + } + } + if !handled && any >= 0 { + finfo := &tinfo.fields[any] + strv := finfo.value(sv, initNilPointers) + if err := d.unmarshalAttr(strv, a); err != nil { + return err } + } + } - case fCharData: + // Determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fCDATA, fCharData: if !saveData.IsValid() { - saveData = finfo.value(sv) + saveData = finfo.value(sv, initNilPointers) } case fComment: if !saveComment.IsValid() { - saveComment = finfo.value(sv) + saveComment = finfo.value(sv, initNilPointers) } case fAny, fAny | fElement: if !saveAny.IsValid() { - saveAny = finfo.value(sv) + saveAny = finfo.value(sv, initNilPointers) } - case fInnerXml: + case fInnerXML: if !saveXML.IsValid() { - saveXML = finfo.value(sv) - if p.saved == nil { + saveXML = finfo.value(sv, initNilPointers) + if d.saved == nil { saveXMLIndex = 0 - p.saved = new(bytes.Buffer) + d.saved = new(bytes.Buffer) } else { - saveXMLIndex = p.savedOffset() + saveXMLIndex = d.savedOffset() } } } @@ -534,9 +571,9 @@ Loop: for { var savedOffset int if saveXML.IsValid() { - savedOffset = p.savedOffset() + savedOffset = d.savedOffset() } - tok, err := p.Token() + tok, err := d.Token() if err != nil { return err } @@ -544,28 +581,28 @@ Loop: case StartElement: consumed := false if sv.IsValid() { - consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + consumed, err = d.unmarshalPath(tinfo, sv, nil, &t) if err != nil { return err } if !consumed && saveAny.IsValid() { consumed = true - if err := p.unmarshal(saveAny, &t); err != nil { + if err := d.unmarshal(saveAny, &t); err != nil { return err } } } if !consumed { - if err := p.Skip(); err != nil { + if err := d.Skip(); err != nil { return err } } case EndElement: if saveXML.IsValid() { - saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + saveXMLData = d.saved.Bytes()[saveXMLIndex:savedOffset] if saveXMLIndex == 0 { - p.saved = nil + d.saved = nil } } break Loop @@ -614,7 +651,9 @@ Loop: case reflect.String: t.SetString(string(saveXMLData)) case reflect.Slice: - t.Set(reflect.ValueOf(saveXMLData)) + if t.Type().Elem().Kind() == reflect.Uint8 { + t.Set(reflect.ValueOf(saveXMLData)) + } } return nil @@ -637,7 +676,11 @@ func copyValue(dst reflect.Value, src []byte) (err error) { default: return errors.New("cannot unmarshal into " + dst0.Type().String()) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if len(src) == 0 { + dst.SetInt(0) + return nil + } + itmp, err := strconv.ParseInt(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) if err != nil { return err } @@ -663,19 +706,32 @@ func copyValue(dst reflect.Value, src []byte) (err error) { utmp = uint64(uint64(itmp)) } } else { - utmp, err = strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if len(src) == 0 { + dst.SetUint(0) + return nil + } + + utmp, err = strconv.ParseUint(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) if err != nil { return err } } dst.SetUint(utmp) case reflect.Float32, reflect.Float64: - ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if len(src) == 0 { + dst.SetFloat(0) + return nil + } + ftmp, err := strconv.ParseFloat(strings.TrimSpace(string(src)), dst.Type().Bits()) if err != nil { return err } dst.SetFloat(ftmp) case reflect.Bool: + if len(src) == 0 { + dst.SetBool(false) + return nil + } value, err := strconv.ParseBool(strings.TrimSpace(string(src))) if err != nil { return err @@ -698,7 +754,7 @@ func copyValue(dst reflect.Value, src []byte) (err error) { // The consumed result tells whether XML elements have been consumed // from the Decoder until start's matching end element, or if it's // still untouched because start is uninteresting for sv's fields. -func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { +func (d *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { recurse := false Loop: for i := range tinfo.fields { @@ -713,7 +769,7 @@ Loop: } if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { // It's a perfect match, unmarshal the field. - return true, p.unmarshal(finfo.value(sv), start) + return true, d.unmarshal(finfo.value(sv, initNilPointers), start) } if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { // It's a prefix for the field. Break and recurse @@ -736,18 +792,18 @@ Loop: // prefix. Recurse and attempt to match these. for { var tok Token - tok, err = p.Token() + tok, err = d.Token() if err != nil { return true, err } switch t := tok.(type) { case StartElement: - consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + consumed2, err := d.unmarshalPath(tinfo, sv, parents, &t) if err != nil { return true, err } if !consumed2 { - if err := p.Skip(); err != nil { + if err := d.Skip(); err != nil { return true, err } } diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go b/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go index 086e83b69..5eb0b4e5f 100644 --- a/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go +++ b/vendor/github.com/vmware/govmomi/vim25/xml/typeinfo.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -31,37 +31,37 @@ type fieldFlags int const ( fElement fieldFlags = 1 << iota fAttr + fCDATA fCharData - fInnerXml + fInnerXML fComment fAny fOmitEmpty fTypeAttr - fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny + fMode = fElement | fAttr | fCDATA | fCharData | fInnerXML | fComment | fAny + + xmlName = "XMLName" ) -var tinfoMap = make(map[reflect.Type]*typeInfo) -var tinfoLock sync.RWMutex +var tinfoMap sync.Map // map[reflect.Type]*typeInfo var nameType = reflect.TypeOf(Name{}) // getTypeInfo returns the typeInfo structure with details necessary -// for marshalling and unmarshalling typ. +// for marshaling and unmarshaling typ. func getTypeInfo(typ reflect.Type) (*typeInfo, error) { - tinfoLock.RLock() - tinfo, ok := tinfoMap[typ] - tinfoLock.RUnlock() - if ok { - return tinfo, nil + if ti, ok := tinfoMap.Load(typ); ok { + return ti.(*typeInfo), nil } - tinfo = &typeInfo{} + + tinfo := &typeInfo{} if typ.Kind() == reflect.Struct && typ != nameType { n := typ.NumField() for i := 0; i < n; i++ { f := typ.Field(i) - if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + if (f.PkgPath != "" && !f.Anonymous) || f.Tag.Get("xml") == "-" { continue // Private field } @@ -94,7 +94,7 @@ func getTypeInfo(typ reflect.Type) (*typeInfo, error) { return nil, err } - if f.Name == "XMLName" { + if f.Name == xmlName { tinfo.xmlname = finfo continue } @@ -105,10 +105,9 @@ func getTypeInfo(typ reflect.Type) (*typeInfo, error) { } } } - tinfoLock.Lock() - tinfoMap[typ] = tinfo - tinfoLock.Unlock() - return tinfo, nil + + ti, _ := tinfoMap.LoadOrStore(typ, tinfo) + return ti.(*typeInfo), nil } // structFieldInfo builds and returns a fieldInfo for f. @@ -131,10 +130,12 @@ func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, erro switch flag { case "attr": finfo.flags |= fAttr + case "cdata": + finfo.flags |= fCDATA case "chardata": finfo.flags |= fCharData case "innerxml": - finfo.flags |= fInnerXml + finfo.flags |= fInnerXML case "comment": finfo.flags |= fComment case "any": @@ -151,8 +152,8 @@ func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, erro switch mode := finfo.flags & fMode; mode { case 0: finfo.flags |= fElement - case fAttr, fCharData, fInnerXml, fComment, fAny: - if f.Name == "XMLName" || tag != "" && mode != fAttr { + case fAttr, fCDATA, fCharData, fInnerXML, fComment, fAny, fAny | fAttr: + if f.Name == xmlName || tag != "" && mode != fAttr { valid = false } default: @@ -177,7 +178,7 @@ func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, erro f.Name, typ, f.Tag.Get("xml")) } - if f.Name == "XMLName" { + if f.Name == xmlName { // The XMLName field records the XML element name. Don't // process it as usual because its name should default to // empty rather than to the field name. @@ -214,7 +215,7 @@ func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, erro } // If the field type has an XMLName field, the names must match - // so that the behavior of both marshalling and unmarshalling + // so that the behavior of both marshaling and unmarshaling // is straightforward and unambiguous. if finfo.flags&fElement != 0 { ftyp := f.Type @@ -239,11 +240,11 @@ func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { } for i, n := 0, typ.NumField(); i < n; i++ { f := typ.Field(i) - if f.Name != "XMLName" { + if f.Name != xmlName { continue } finfo, err := structFieldInfo(typ, &f) - if finfo.name != "" && err == nil { + if err == nil && finfo.name != "" { return finfo } // Also consider errors as a non-existent field tag @@ -334,7 +335,7 @@ Loop: return nil } -// A TagPathError represents an error in the unmarshalling process +// A TagPathError represents an error in the unmarshaling process // caused by the use of field tags with conflicting paths. type TagPathError struct { Struct reflect.Type @@ -346,15 +347,25 @@ func (e *TagPathError) Error() string { return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) } +const ( + initNilPointers = true + dontInitNilPointers = false +) + // value returns v's field value corresponding to finfo. -// It's equivalent to v.FieldByIndex(finfo.idx), but initializes -// and dereferences pointers as necessary. -func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { +// It's equivalent to v.FieldByIndex(finfo.idx), but when passed +// initNilPointers, it initializes and dereferences pointers as necessary. +// When passed dontInitNilPointers and a nil pointer is reached, the function +// returns a zero reflect.Value. +func (finfo *fieldInfo) value(v reflect.Value, shouldInitNilPointers bool) reflect.Value { for i, x := range finfo.idx { if i > 0 { t := v.Type() if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { if v.IsNil() { + if !shouldInitNilPointers { + return reflect.Value{} + } v.Set(reflect.New(v.Type().Elem())) } v = v.Elem() diff --git a/vendor/github.com/vmware/govmomi/vim25/xml/xml.go b/vendor/github.com/vmware/govmomi/vim25/xml/xml.go index 6c6c5c821..28631bdfe 100644 --- a/vendor/github.com/vmware/govmomi/vim25/xml/xml.go +++ b/vendor/github.com/vmware/govmomi/vim25/xml/xml.go @@ -7,8 +7,8 @@ package xml // References: -// Annotated XML spec: http://www.xml.com/axml/testaxml.htm -// XML name spaces: http://www.w3.org/TR/REC-xml-names/ +// Annotated XML spec: https://www.xml.com/axml/testaxml.htm +// XML name spaces: https://www.w3.org/TR/REC-xml-names/ // TODO(rsc): // Test error handling. @@ -61,6 +61,7 @@ type StartElement struct { Attr []Attr } +// Copy creates a new copy of StartElement. func (e StartElement) Copy() StartElement { attrs := make([]Attr, len(e.Attr)) copy(attrs, e.Attr) @@ -89,12 +90,14 @@ func makeCopy(b []byte) []byte { return b1 } +// Copy creates a new copy of CharData. func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } // A Comment represents an XML comment of the form . // The bytes do not include the comment markers. type Comment []byte +// Copy creates a new copy of Comment. func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } // A ProcInst represents an XML processing instruction of the form @@ -103,6 +106,7 @@ type ProcInst struct { Inst []byte } +// Copy creates a new copy of ProcInst. func (p ProcInst) Copy() ProcInst { p.Inst = makeCopy(p.Inst) return p @@ -112,6 +116,7 @@ func (p ProcInst) Copy() ProcInst { // The bytes do not include the markers. type Directive []byte +// Copy creates a new copy of Directive. func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } // CopyToken returns a copy of a Token. @@ -131,6 +136,23 @@ func CopyToken(t Token) Token { return t } +// A TokenReader is anything that can decode a stream of XML tokens, including a +// Decoder. +// +// When Token encounters an error or end-of-file condition after successfully +// reading a token, it returns the token. It may return the (non-nil) error from +// the same call or return the error (and a nil token) from a subsequent call. +// An instance of this general case is that a TokenReader returning a non-nil +// token at the end of the token stream may return either io.EOF or a nil error. +// The next Read should return nil, io.EOF. +// +// Implementations of Token are discouraged from returning a nil token with a +// nil error. Callers should treat a return of nil, nil as indicating that +// nothing happened; in particular it does not indicate EOF. +type TokenReader interface { + Token() (Token, error) +} + // A Decoder represents an XML parser reading a particular input stream. // The parser assumes that its input is encoded in UTF-8. type Decoder struct { @@ -146,9 +168,9 @@ type Decoder struct { // // Setting: // - // d.Strict = false; - // d.AutoClose = HTMLAutoClose; - // d.Entity = HTMLEntity + // d.Strict = false + // d.AutoClose = xml.HTMLAutoClose + // d.Entity = xml.HTMLEntity // // creates a parser that can handle typical HTML. // @@ -177,7 +199,7 @@ type Decoder struct { // charset-conversion readers, converting from the provided // non-UTF-8 charset into UTF-8. If CharsetReader is nil or // returns an error, parsing stops with an error. One of the - // the CharsetReader's result values must be non-nil. + // CharsetReader's result values must be non-nil. CharsetReader func(charset string, input io.Reader) (io.Reader, error) // DefaultSpace sets the default name space used for unadorned tags, @@ -189,6 +211,7 @@ type Decoder struct { TypeFunc func(string) (reflect.Type, bool) r io.ByteReader + t TokenReader buf bytes.Buffer saved *bytes.Buffer stk *stack @@ -200,6 +223,7 @@ type Decoder struct { ns map[string]string err error line int + offset int64 unmarshalDepth int } @@ -217,12 +241,28 @@ func NewDecoder(r io.Reader) *Decoder { return d } +// NewTokenDecoder creates a new XML parser using an underlying token stream. +func NewTokenDecoder(t TokenReader) *Decoder { + // Is it already a Decoder? + if d, ok := t.(*Decoder); ok { + return d + } + d := &Decoder{ + ns: make(map[string]string), + t: t, + nextByte: -1, + line: 1, + Strict: true, + } + return d +} + // Token returns the next XML token in the input stream. // At the end of the input stream, Token returns nil, io.EOF. // // Slices of bytes in the returned token data refer to the // parser's internal buffer and remain valid only until the next -// call to Token. To acquire a copy of the bytes, call CopyToken +// call to Token. To acquire a copy of the bytes, call CopyToken // or the token's Copy method. // // Token expands self-closing elements such as
@@ -230,25 +270,33 @@ func NewDecoder(r io.Reader) *Decoder { // // Token guarantees that the StartElement and EndElement // tokens it returns are properly nested and matched: -// if Token encounters an unexpected end element, +// if Token encounters an unexpected end element +// or EOF before all expected end elements, // it will return an error. // // Token implements XML name spaces as described by -// http://www.w3.org/TR/REC-xml-names/. Each of the +// https://www.w3.org/TR/REC-xml-names/. Each of the // Name structures contained in the Token has the Space // set to the URL identifying its name space when known. // If Token encounters an unrecognized name space prefix, // it uses the prefix as the Space rather than report an error. -func (d *Decoder) Token() (t Token, err error) { +func (d *Decoder) Token() (Token, error) { + var t Token + var err error if d.stk != nil && d.stk.kind == stkEOF { - err = io.EOF - return + return nil, io.EOF } if d.nextToken != nil { t = d.nextToken d.nextToken = nil } else if t, err = d.rawToken(); err != nil { - return + switch { + case err == io.EOF && d.t != nil: + err = nil + case err == io.EOF && d.stk != nil && d.stk.kind != stkEOF: + err = d.syntaxError("unexpected EOF") + } + return t, err } if !d.Strict { @@ -264,12 +312,12 @@ func (d *Decoder) Token() (t Token, err error) { // to the other attribute names, so process // the translations first. for _, a := range t1.Attr { - if a.Name.Space == "xmlns" { + if a.Name.Space == xmlnsPrefix { v, ok := d.ns[a.Name.Local] d.pushNs(a.Name.Local, v, ok) d.ns[a.Name.Local] = a.Value } - if a.Name.Space == "" && a.Name.Local == "xmlns" { + if a.Name.Space == "" && a.Name.Local == xmlnsPrefix { // Default space for untagged names v, ok := d.ns[""] d.pushNs("", v, ok) @@ -291,23 +339,27 @@ func (d *Decoder) Token() (t Token, err error) { } t = t1 } - return + return t, err } -const xmlURL = "http://www.w3.org/XML/1998/namespace" +const ( + xmlURL = "http://www.w3.org/XML/1998/namespace" + xmlnsPrefix = "xmlns" + xmlPrefix = "xml" +) // Apply name space translation to name n. // The default name space (for Space=="") // applies only to element names, not to attribute names. func (d *Decoder) translate(n *Name, isElementName bool) { switch { - case n.Space == "xmlns": + case n.Space == xmlnsPrefix: return case n.Space == "" && !isElementName: return - case n.Space == "xml": + case n.Space == xmlPrefix: n.Space = xmlURL - case n.Space == "" && n.Local == "xmlns": + case n.Space == "" && n.Local == xmlnsPrefix: return } if v, ok := d.ns[n.Space]; ok { @@ -330,7 +382,7 @@ func (d *Decoder) switchToReader(r io.Reader) { } // Parsing state - stack holds old name space translations -// and the current set of open elements. The translations to pop when +// and the current set of open elements. The translations to pop when // ending a given tag are *below* it on the stack, which is // more work but forced on us by XML. type stack struct { @@ -501,6 +553,9 @@ func (d *Decoder) RawToken() (Token, error) { } func (d *Decoder) rawToken() (Token, error) { + if d.t != nil { + return d.t.Token() + } if d.err != nil { return nil, d.err } @@ -552,7 +607,6 @@ func (d *Decoder) rawToken() (Token, error) { case '?': // if target == "xml" { - enc := procInstEncoding(string(data)) - if enc != "" && enc != "utf-8" && enc != "UTF-8" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" && !strings.EqualFold(enc, "utf-8") { if d.CharsetReader == nil { d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) return nil, d.err @@ -619,7 +679,12 @@ func (d *Decoder) rawToken() (Token, error) { return nil, d.err } d.buf.WriteByte(b) - if b0 == '-' && b1 == '-' && b == '>' { + if b0 == '-' && b1 == '-' { + if b != '>' { + d.err = d.syntaxError( + `invalid sequence "--" not allowed in comments`) + return nil, d.err + } break } b0, b1 = b1, b @@ -726,7 +791,7 @@ func (d *Decoder) rawToken() (Token, error) { return nil, d.err } - attr = make([]Attr, 0, 4) + attr = []Attr{} for { d.space() if b, ok = d.mustgetc(); !ok { @@ -748,14 +813,7 @@ func (d *Decoder) rawToken() (Token, error) { } d.ungetc(b) - n := len(attr) - if n >= cap(attr) { - nattr := make([]Attr, n, 2*cap(attr)) - copy(nattr, attr) - attr = nattr - } - attr = attr[0 : n+1] - a := &attr[n] + a := Attr{} if a.Name, ok = d.nsname(); !ok { if d.err == nil { d.err = d.syntaxError("expected attribute name in element") @@ -770,10 +828,9 @@ func (d *Decoder) rawToken() (Token, error) { if d.Strict { d.err = d.syntaxError("attribute name without = in element") return nil, d.err - } else { - d.ungetc(b) - a.Value = a.Name.Local } + d.ungetc(b) + a.Value = a.Name.Local } else { d.space() data := d.attrval() @@ -782,6 +839,7 @@ func (d *Decoder) rawToken() (Token, error) { } a.Value = string(data) } + attr = append(attr, a) } if empty { d.needClose = true @@ -812,7 +870,7 @@ func (d *Decoder) attrval() []byte { if !ok { return nil } - // http://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2 + // https://www.w3.org/TR/REC-html40/intro/sgmltut.html#h-3.2.2 if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' || '0' <= b && b <= '9' || b == '_' || b == ':' || b == '-' { d.buf.WriteByte(b) @@ -863,9 +921,17 @@ func (d *Decoder) getc() (b byte, ok bool) { if b == '\n' { d.line++ } + d.offset++ return b, true } +// InputOffset returns the input stream byte offset of the current decoder position. +// The offset gives the location of the end of the most recently returned token +// and the beginning of the next token. +func (d *Decoder) InputOffset() int64 { + return d.offset +} + // Return saved offset. // If we did ungetc (nextByte >= 0), have to back up one. func (d *Decoder) savedOffset() int { @@ -895,9 +961,10 @@ func (d *Decoder) ungetc(b byte) { d.line-- } d.nextByte = int(b) + d.offset-- } -var entity = map[string]int{ +var entity = map[string]rune{ "lt": '<', "gt": '>', "amp": '&', @@ -992,7 +1059,7 @@ Input: d.buf.WriteByte(';') n, err := strconv.ParseUint(s, base, 64) if err == nil && n <= unicode.MaxRune { - text = string(n) + text = string(rune(n)) haveText = true } } @@ -1002,7 +1069,6 @@ Input: if d.err != nil { return nil } - ok = false } if b, ok = d.mustgetc(); !ok { return nil @@ -1075,13 +1141,13 @@ Input: } // Decide whether the given rune is in the XML Character Range, per -// the Char production of http://www.xml.com/axml/testaxml.htm, +// the Char production of https://www.xml.com/axml/testaxml.htm, // Section 2.2 Characters. func isInCharacterRange(r rune) (inrange bool) { return r == 0x09 || r == 0x0A || r == 0x0D || - r >= 0x20 && r <= 0xDF77 || + r >= 0x20 && r <= 0xD7FF || r >= 0xE000 && r <= 0xFFFD || r >= 0x10000 && r <= 0x10FFFF } @@ -1113,12 +1179,12 @@ func (d *Decoder) name() (s string, ok bool) { } // Now we check the characters. - s = d.buf.String() - if !isName([]byte(s)) { - d.err = d.syntaxError("invalid XML name: " + s) + b := d.buf.Bytes() + if !isName(b) { + d.err = d.syntaxError("invalid XML name: " + string(b)) return "", false } - return s, true + return string(b), true } // Read a name and append its bytes to d.buf. @@ -1204,8 +1270,8 @@ func isNameString(s string) bool { } // These tables were generated by cut and paste from Appendix B of -// the XML spec at http://www.xml.com/axml/testaxml.htm -// and then reformatting. First corresponds to (Letter | '_' | ':') +// the XML spec at https://www.xml.com/axml/testaxml.htm +// and then reformatting. First corresponds to (Letter | '_' | ':') // and second corresponds to NameChar. var first = &unicode.RangeTable{ @@ -1522,7 +1588,9 @@ var second = &unicode.RangeTable{ // HTMLEntity is an entity map containing translations for the // standard HTML entity characters. -var HTMLEntity = htmlEntity +// +// See the Decoder.Strict and Decoder.Entity fields' documentation. +var HTMLEntity map[string]string = htmlEntity var htmlEntity = map[string]string{ /* @@ -1789,7 +1857,9 @@ var htmlEntity = map[string]string{ // HTMLAutoClose is the set of HTML elements that // should be considered to close automatically. -var HTMLAutoClose = htmlAutoClose +// +// See the Decoder.Strict and Decoder.Entity fields' documentation. +var HTMLAutoClose []string = htmlAutoClose var htmlAutoClose = []string{ /* @@ -1812,20 +1882,27 @@ var htmlAutoClose = []string{ } var ( - esc_quot = []byte(""") // shorter than """ - esc_apos = []byte("'") // shorter than "'" - esc_amp = []byte("&") - esc_lt = []byte("<") - esc_gt = []byte(">") - esc_tab = []byte(" ") - esc_nl = []byte(" ") - esc_cr = []byte(" ") - esc_fffd = []byte("\uFFFD") // Unicode replacement character + escQuot = []byte(""") // shorter than """ + escApos = []byte("'") // shorter than "'" + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escTab = []byte(" ") + escNL = []byte(" ") + escCR = []byte(" ") + escFFFD = []byte("\uFFFD") // Unicode replacement character ) // EscapeText writes to w the properly escaped XML equivalent // of the plain text data s. func EscapeText(w io.Writer, s []byte) error { + return escapeText(w, s, true) +} + +// escapeText writes to w the properly escaped XML equivalent +// of the plain text data s. If escapeNewline is true, newline +// characters will be escaped. +func escapeText(w io.Writer, s []byte, escapeNewline bool) error { var esc []byte last := 0 for i := 0; i < len(s); { @@ -1833,24 +1910,27 @@ func EscapeText(w io.Writer, s []byte) error { i += width switch r { case '"': - esc = esc_quot + esc = escQuot case '\'': - esc = esc_apos + esc = escApos case '&': - esc = esc_amp + esc = escAmp case '<': - esc = esc_lt + esc = escLT case '>': - esc = esc_gt + esc = escGT case '\t': - esc = esc_tab + esc = escTab case '\n': - esc = esc_nl + if !escapeNewline { + continue + } + esc = escNL case '\r': - esc = esc_cr + esc = escCR default: if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = esc_fffd + esc = escFFFD break } continue @@ -1863,10 +1943,8 @@ func EscapeText(w io.Writer, s []byte) error { } last = i } - if _, err := w.Write(s[last:]); err != nil { - return err - } - return nil + _, err := w.Write(s[last:]) + return err } // EscapeString writes to p the properly escaped XML equivalent @@ -1879,24 +1957,24 @@ func (p *printer) EscapeString(s string) { i += width switch r { case '"': - esc = esc_quot + esc = escQuot case '\'': - esc = esc_apos + esc = escApos case '&': - esc = esc_amp + esc = escAmp case '<': - esc = esc_lt + esc = escLT case '>': - esc = esc_gt + esc = escGT case '\t': - esc = esc_tab + esc = escTab case '\n': - esc = esc_nl + esc = escNL case '\r': - esc = esc_cr + esc = escCR default: if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = esc_fffd + esc = escFFFD break } continue @@ -1915,16 +1993,55 @@ func Escape(w io.Writer, s []byte) { EscapeText(w, s) } -// procInstEncoding parses the `encoding="..."` or `encoding='...'` +var ( + cdataStart = []byte("") + cdataEscape = []byte("]]]]>") +) + +// emitCDATA writes to w the CDATA-wrapped plain text data s. +// It escapes CDATA directives nested in s. +func emitCDATA(w io.Writer, s []byte) error { + if len(s) == 0 { + return nil + } + if _, err := w.Write(cdataStart); err != nil { + return err + } + for { + i := bytes.Index(s, cdataEnd) + if i >= 0 && i+len(cdataEnd) <= len(s) { + // Found a nested CDATA directive end. + if _, err := w.Write(s[:i]); err != nil { + return err + } + if _, err := w.Write(cdataEscape); err != nil { + return err + } + i += len(cdataEnd) + } else { + if _, err := w.Write(s); err != nil { + return err + } + break + } + s = s[i:] + } + _, err := w.Write(cdataEnd) + return err +} + +// procInst parses the `param="..."` or `param='...'` // value out of the provided string, returning "" if not found. -func procInstEncoding(s string) string { +func procInst(param, s string) string { // TODO: this parsing is somewhat lame and not exact. // It works for all actual cases, though. - idx := strings.Index(s, "encoding=") + param = param + "=" + idx := strings.Index(s, param) if idx == -1 { return "" } - v := s[idx+len("encoding="):] + v := s[idx+len(param):] if v == "" { return "" } diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore index c3fa25389..2e337a0ed 100644 --- a/vendor/go.uber.org/atomic/.gitignore +++ b/vendor/go.uber.org/atomic/.gitignore @@ -10,3 +10,6 @@ lint.log # Profiling output *.prof + +# Output of fossa analyzer +/fossa diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml deleted file mode 100644 index 13d0a4f25..000000000 --- a/vendor/go.uber.org/atomic/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/atomic - -env: - global: - - GO111MODULE=on - -matrix: - include: - - go: oldstable - - go: stable - env: LINT=1 - -cache: - directories: - - vendor - -before_install: - - go version - -script: - - test -z "$LINT" || make lint - - make cover - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md index 24c0274dc..3c3f2380c 100644 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -4,6 +4,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.8.0] - 2021-06-09 +### Added +- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. +- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. + ## [1.7.0] - 2020-09-14 ### Added - Support JSON serialization and deserialization of primitive atomic types. @@ -63,6 +68,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Initial release. +[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 [1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 [1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 [1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile index 1b1376d42..46c945b32 100644 --- a/vendor/go.uber.org/atomic/Makefile +++ b/vendor/go.uber.org/atomic/Makefile @@ -69,6 +69,7 @@ generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) generatenodirty: @[ -z "$$(git status --porcelain)" ] || ( \ echo "Working tree is dirty. Commit your changes first."; \ + git status; \ exit 1 ) @make generate @status=$$(git status --porcelain); \ diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md index ade0c20f1..96b47a1f1 100644 --- a/vendor/go.uber.org/atomic/README.md +++ b/vendor/go.uber.org/atomic/README.md @@ -55,8 +55,8 @@ Released under the [MIT License](LICENSE.txt). [doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg [doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.com/uber-go/atomic +[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/atomic [reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go index 9cf1914b1..de5f72ad2 100644 --- a/vendor/go.uber.org/atomic/bool.go +++ b/vendor/go.uber.org/atomic/bool.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go index 027cfcb20..2bad9c010 100644 --- a/vendor/go.uber.org/atomic/duration.go +++ b/vendor/go.uber.org/atomic/duration.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go index a6166fbea..fc529756c 100644 --- a/vendor/go.uber.org/atomic/error.go +++ b/vendor/go.uber.org/atomic/error.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go index 071906020..f833fd373 100644 --- a/vendor/go.uber.org/atomic/float64.go +++ b/vendor/go.uber.org/atomic/float64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go index 50d6b2485..1e9ef4f87 100644 --- a/vendor/go.uber.org/atomic/gen.go +++ b/vendor/go.uber.org/atomic/gen.go @@ -24,3 +24,4 @@ package atomic //go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go //go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go //go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go +//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go index 18ae56493..a20fc998e 100644 --- a/vendor/go.uber.org/atomic/int32.go +++ b/vendor/go.uber.org/atomic/int32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go index 2bcbbfaa9..f03ee0090 100644 --- a/vendor/go.uber.org/atomic/int64.go +++ b/vendor/go.uber.org/atomic/int64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index 225b7a2be..f8a6f339a 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go index a973aba1a..0b5173230 100644 --- a/vendor/go.uber.org/atomic/uint32.go +++ b/vendor/go.uber.org/atomic/uint32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go index 3b6c71fd5..71e3f3247 100644 --- a/vendor/go.uber.org/atomic/uint64.go +++ b/vendor/go.uber.org/atomic/uint64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go new file mode 100644 index 000000000..6b363ada3 --- /dev/null +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uintptr is an atomic wrapper around uintptr. +type Uintptr struct { + _ nocmp // disallow non-atomic comparison + + v uintptr +} + +// NewUintptr creates a new Uintptr. +func NewUintptr(i uintptr) *Uintptr { + return &Uintptr{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uintptr) Load() uintptr { + return atomic.LoadUintptr(&i.v) +} + +// Add atomically adds to the wrapped uintptr and returns the new value. +func (i *Uintptr) Add(n uintptr) uintptr { + return atomic.AddUintptr(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uintptr and returns the new value. +func (i *Uintptr) Sub(n uintptr) uintptr { + return atomic.AddUintptr(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uintptr and returns the new value. +func (i *Uintptr) Inc() uintptr { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uintptr and returns the new value. +func (i *Uintptr) Dec() uintptr { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uintptr) CAS(old, new uintptr) bool { + return atomic.CompareAndSwapUintptr(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uintptr) Store(n uintptr) { + atomic.StoreUintptr(&i.v, n) +} + +// Swap atomically swaps the wrapped uintptr and returns the old value. +func (i *Uintptr) Swap(n uintptr) uintptr { + return atomic.SwapUintptr(&i.v, n) +} + +// MarshalJSON encodes the wrapped uintptr into JSON. +func (i *Uintptr) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uintptr. +func (i *Uintptr) UnmarshalJSON(b []byte) error { + var v uintptr + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uintptr) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go new file mode 100644 index 000000000..a3830c665 --- /dev/null +++ b/vendor/go.uber.org/atomic/unsafe_pointer.go @@ -0,0 +1,58 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "sync/atomic" + "unsafe" +) + +// UnsafePointer is an atomic wrapper around unsafe.Pointer. +type UnsafePointer struct { + _ nocmp // disallow non-atomic comparison + + v unsafe.Pointer +} + +// NewUnsafePointer creates a new UnsafePointer. +func NewUnsafePointer(p unsafe.Pointer) *UnsafePointer { + return &UnsafePointer{v: p} +} + +// Load atomically loads the wrapped value. +func (p *UnsafePointer) Load() unsafe.Pointer { + return atomic.LoadPointer(&p.v) +} + +// Store atomically stores the passed value. +func (p *UnsafePointer) Store(q unsafe.Pointer) { + atomic.StorePointer(&p.v, q) +} + +// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. +func (p *UnsafePointer) Swap(q unsafe.Pointer) unsafe.Pointer { + return atomic.SwapPointer(&p.v, q) +} + +// CAS is an atomic compare-and-swap. +func (p *UnsafePointer) CAS(old, new unsafe.Pointer) bool { + return atomic.CompareAndSwapPointer(&p.v, old, new) +} diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index fdfef8808..794ee303e 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,4 +1,16 @@ # Changelog +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## 1.19.1 (8 Sep 2021) + +### Fixed +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 ## 1.19.0 (9 Aug 2021) diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index 5cf7d917e..af220d9b4 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -128,6 +128,11 @@ func (enc *jsonEncoder) AddFloat64(key string, val float64) { enc.AppendFloat64(val) } +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + func (enc *jsonEncoder) AddInt64(key string, val int64) { enc.addKey(key) enc.AppendInt64(val) @@ -228,7 +233,11 @@ func (enc *jsonEncoder) AppendComplex128(val complex128) { // Because we're always in a quoted string, we can use strconv without // special-casing NaN and +/-Inf. enc.buf.AppendFloat(r, 64) - enc.buf.AppendByte('+') + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } enc.buf.AppendFloat(i, 64) enc.buf.AppendByte('i') enc.buf.AppendByte('"') @@ -293,7 +302,6 @@ func (enc *jsonEncoder) AppendUint64(val uint64) { } func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE deleted file mode 100644 index e4a47e17f..000000000 --- a/vendor/golang.org/x/xerrors/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2019 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/xerrors/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/xerrors/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README deleted file mode 100644 index aac7867a5..000000000 --- a/vendor/golang.org/x/xerrors/README +++ /dev/null @@ -1,2 +0,0 @@ -This repository holds the transition packages for the new Go 1.13 error values. -See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go deleted file mode 100644 index 4317f2483..000000000 --- a/vendor/golang.org/x/xerrors/adaptor.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strconv" -) - -// FormatError calls the FormatError method of f with an errors.Printer -// configured according to s and verb, and writes the result to s. -func FormatError(f Formatter, s fmt.State, verb rune) { - // Assuming this function is only called from the Format method, and given - // that FormatError takes precedence over Format, it cannot be called from - // any package that supports errors.Formatter. It is therefore safe to - // disregard that State may be a specific printer implementation and use one - // of our choice instead. - - // limitations: does not support printing error as Go struct. - - var ( - sep = " " // separator before next error - p = &state{State: s} - direct = true - ) - - var err error = f - - switch verb { - // Note that this switch must match the preference order - // for ordinary string printing (%#v before %+v, and so on). - - case 'v': - if s.Flag('#') { - if stringer, ok := err.(fmt.GoStringer); ok { - io.WriteString(&p.buf, stringer.GoString()) - goto exit - } - // proceed as if it were %v - } else if s.Flag('+') { - p.printDetail = true - sep = "\n - " - } - case 's': - case 'q', 'x', 'X': - // Use an intermediate buffer in the rare cases that precision, - // truncation, or one of the alternative verbs (q, x, and X) are - // specified. - direct = false - - default: - p.buf.WriteString("%!") - p.buf.WriteRune(verb) - p.buf.WriteByte('(') - switch { - case err != nil: - p.buf.WriteString(reflect.TypeOf(f).String()) - default: - p.buf.WriteString("") - } - p.buf.WriteByte(')') - io.Copy(s, &p.buf) - return - } - -loop: - for { - switch v := err.(type) { - case Formatter: - err = v.FormatError((*printer)(p)) - case fmt.Formatter: - v.Format(p, 'v') - break loop - default: - io.WriteString(&p.buf, v.Error()) - break loop - } - if err == nil { - break - } - if p.needColon || !p.printDetail { - p.buf.WriteByte(':') - p.needColon = false - } - p.buf.WriteString(sep) - p.inDetail = false - p.needNewline = false - } - -exit: - width, okW := s.Width() - prec, okP := s.Precision() - - if !direct || (okW && width > 0) || okP { - // Construct format string from State s. - format := []byte{'%'} - if s.Flag('-') { - format = append(format, '-') - } - if s.Flag('+') { - format = append(format, '+') - } - if s.Flag(' ') { - format = append(format, ' ') - } - if okW { - format = strconv.AppendInt(format, int64(width), 10) - } - if okP { - format = append(format, '.') - format = strconv.AppendInt(format, int64(prec), 10) - } - format = append(format, string(verb)...) - fmt.Fprintf(s, string(format), p.buf.String()) - } else { - io.Copy(s, &p.buf) - } -} - -var detailSep = []byte("\n ") - -// state tracks error printing state. It implements fmt.State. -type state struct { - fmt.State - buf bytes.Buffer - - printDetail bool - inDetail bool - needColon bool - needNewline bool -} - -func (s *state) Write(b []byte) (n int, err error) { - if s.printDetail { - if len(b) == 0 { - return 0, nil - } - if s.inDetail && s.needColon { - s.needNewline = true - if b[0] == '\n' { - b = b[1:] - } - } - k := 0 - for i, c := range b { - if s.needNewline { - if s.inDetail && s.needColon { - s.buf.WriteByte(':') - s.needColon = false - } - s.buf.Write(detailSep) - s.needNewline = false - } - if c == '\n' { - s.buf.Write(b[k:i]) - k = i + 1 - s.needNewline = true - } - } - s.buf.Write(b[k:]) - if !s.inDetail { - s.needColon = true - } - } else if !s.inDetail { - s.buf.Write(b) - } - return len(b), nil -} - -// printer wraps a state to implement an xerrors.Printer. -type printer state - -func (s *printer) Print(args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprint((*state)(s), args...) - } -} - -func (s *printer) Printf(format string, args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprintf((*state)(s), format, args...) - } -} - -func (s *printer) Detail() bool { - s.inDetail = true - return s.printDetail -} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg deleted file mode 100644 index 3f8b14b64..000000000 --- a/vendor/golang.org/x/xerrors/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go deleted file mode 100644 index eef99d9d5..000000000 --- a/vendor/golang.org/x/xerrors/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xerrors implements functions to manipulate errors. -// -// This package is based on the Go 2 proposal for error values: -// https://golang.org/design/29934-error-values -// -// These functions were incorporated into the standard library's errors package -// in Go 1.13: -// - Is -// - As -// - Unwrap -// -// Also, Errorf's %w verb was incorporated into fmt.Errorf. -// -// Use this package to get equivalent behavior in all supported Go versions. -// -// No other features of this package were included in Go 1.13, and at present -// there are no plans to include any of them. -package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go deleted file mode 100644 index e88d3772d..000000000 --- a/vendor/golang.org/x/xerrors/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import "fmt" - -// errorString is a trivial implementation of error. -type errorString struct { - s string - frame Frame -} - -// New returns an error that formats as the given text. -// -// The returned error contains a Frame set to the caller's location and -// implements Formatter to show this information when printed with details. -func New(text string) error { - return &errorString{text, Caller(1)} -} - -func (e *errorString) Error() string { - return e.s -} - -func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *errorString) FormatError(p Printer) (next error) { - p.Print(e.s) - e.frame.Format(p) - return nil -} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go deleted file mode 100644 index 829862ddf..000000000 --- a/vendor/golang.org/x/xerrors/fmt.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/xerrors/internal" -) - -const percentBangString = "%!" - -// Errorf formats according to a format specifier and returns the string as a -// value that satisfies error. -// -// The returned error includes the file and line number of the caller when -// formatted with additional detail enabled. If the last argument is an error -// the returned error's Format method will return it if the format string ends -// with ": %s", ": %v", or ": %w". If the last argument is an error and the -// format string ends with ": %w", the returned error implements an Unwrap -// method returning it. -// -// If the format specifier includes a %w verb with an error operand in a -// position other than at the end, the returned error will still implement an -// Unwrap method returning the operand, but the error's Format method will not -// return the wrapped error. -// -// It is invalid to include more than one %w verb or to supply it with an -// operand that does not implement the error interface. The %w verb is otherwise -// a synonym for %v. -func Errorf(format string, a ...interface{}) error { - format = formatPlusW(format) - // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. - wrap := strings.HasSuffix(format, ": %w") - idx, format2, ok := parsePercentW(format) - percentWElsewhere := !wrap && idx >= 0 - if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { - err := errorAt(a, len(a)-1) - if err == nil { - return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} - } - // TODO: this is not entirely correct. The error value could be - // printed elsewhere in format if it mixes numbered with unnumbered - // substitutions. With relatively small changes to doPrintf we can - // have it optionally ignore extra arguments and pass the argument - // list in its entirety. - msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - if wrap { - return &wrapError{msg, err, frame} - } - return &noWrapError{msg, err, frame} - } - // Support %w anywhere. - // TODO: don't repeat the wrapped error's message when %w occurs in the middle. - msg := fmt.Sprintf(format2, a...) - if idx < 0 { - return &noWrapError{msg, nil, Caller(1)} - } - err := errorAt(a, idx) - if !ok || err == nil { - // Too many %ws or argument of %w is not an error. Approximate the Go - // 1.13 fmt.Errorf message. - return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} - } - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - return &wrapError{msg, err, frame} -} - -func errorAt(args []interface{}, i int) error { - if i < 0 || i >= len(args) { - return nil - } - err, ok := args[i].(error) - if !ok { - return nil - } - return err -} - -// formatPlusW is used to avoid the vet check that will barf at %w. -func formatPlusW(s string) string { - return s -} - -// Return the index of the only %w in format, or -1 if none. -// Also return a rewritten format string with %w replaced by %v, and -// false if there is more than one %w. -// TODO: handle "%[N]w". -func parsePercentW(format string) (idx int, newFormat string, ok bool) { - // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. - idx = -1 - ok = true - n := 0 - sz := 0 - var isW bool - for i := 0; i < len(format); i += sz { - if format[i] != '%' { - sz = 1 - continue - } - // "%%" is not a format directive. - if i+1 < len(format) && format[i+1] == '%' { - sz = 2 - continue - } - sz, isW = parsePrintfVerb(format[i:]) - if isW { - if idx >= 0 { - ok = false - } else { - idx = n - } - // "Replace" the last character, the 'w', with a 'v'. - p := i + sz - 1 - format = format[:p] + "v" + format[p+1:] - } - n++ - } - return idx, format, ok -} - -// Parse the printf verb starting with a % at s[0]. -// Return how many bytes it occupies and whether the verb is 'w'. -func parsePrintfVerb(s string) (int, bool) { - // Assume only that the directive is a sequence of non-letters followed by a single letter. - sz := 0 - var r rune - for i := 1; i < len(s); i += sz { - r, sz = utf8.DecodeRuneInString(s[i:]) - if unicode.IsLetter(r) { - return i + sz, r == 'w' - } - } - return len(s), false -} - -type noWrapError struct { - msg string - err error - frame Frame -} - -func (e *noWrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *noWrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -type wrapError struct { - msg string - err error - frame Frame -} - -func (e *wrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *wrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -func (e *wrapError) Unwrap() error { - return e.err -} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go deleted file mode 100644 index 1bc9c26b9..000000000 --- a/vendor/golang.org/x/xerrors/format.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -// A Formatter formats error messages. -type Formatter interface { - error - - // FormatError prints the receiver's first error and returns the next error in - // the error chain, if any. - FormatError(p Printer) (next error) -} - -// A Printer formats error messages. -// -// The most common implementation of Printer is the one provided by package fmt -// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message -// typically provide their own implementations. -type Printer interface { - // Print appends args to the message output. - Print(args ...interface{}) - - // Printf writes a formatted string. - Printf(format string, args ...interface{}) - - // Detail reports whether error detail is requested. - // After the first call to Detail, all text written to the Printer - // is formatted as additional detail, or ignored when - // detail has not been requested. - // If Detail returns false, the caller can avoid printing the detail at all. - Detail() bool -} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go deleted file mode 100644 index 0de628ec5..000000000 --- a/vendor/golang.org/x/xerrors/frame.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "runtime" -) - -// A Frame contains part of a call stack. -type Frame struct { - // Make room for three PCs: the one we were asked for, what it called, - // and possibly a PC for skipPleaseUseCallersFrames. See: - // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 - frames [3]uintptr -} - -// Caller returns a Frame that describes a frame on the caller's stack. -// The argument skip is the number of frames to skip over. -// Caller(0) returns the frame for the caller of Caller. -func Caller(skip int) Frame { - var s Frame - runtime.Callers(skip+1, s.frames[:]) - return s -} - -// location reports the file, line, and function of a frame. -// -// The returned function may be "" even if file and line are not. -func (f Frame) location() (function, file string, line int) { - frames := runtime.CallersFrames(f.frames[:]) - if _, ok := frames.Next(); !ok { - return "", "", 0 - } - fr, ok := frames.Next() - if !ok { - return "", "", 0 - } - return fr.Function, fr.File, fr.Line -} - -// Format prints the stack as error detail. -// It should be called from an error's Format implementation -// after printing any other error detail. -func (f Frame) Format(p Printer) { - if p.Detail() { - function, file, line := f.location() - if function != "" { - p.Printf("%s\n ", function) - } - if file != "" { - p.Printf("%s:%d\n", file, line) - } - } -} diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go deleted file mode 100644 index 89f4eca5d..000000000 --- a/vendor/golang.org/x/xerrors/internal/internal.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// EnableTrace indicates whether stack information should be recorded in errors. -var EnableTrace = true diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go deleted file mode 100644 index 9a3b51037..000000000 --- a/vendor/golang.org/x/xerrors/wrap.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "reflect" -) - -// A Wrapper provides context around another error. -type Wrapper interface { - // Unwrap returns the next error in the error chain. - // If there is no next error, Unwrap returns nil. - Unwrap() error -} - -// Opaque returns an error with the same error formatting as err -// but that does not match err and cannot be unwrapped. -func Opaque(err error) error { - return noWrapper{err} -} - -type noWrapper struct { - error -} - -func (e noWrapper) FormatError(p Printer) (next error) { - if f, ok := e.error.(Formatter); ok { - return f.FormatError(p) - } - p.Print(e.error) - return nil -} - -// Unwrap returns the result of calling the Unwrap method on err, if err implements -// Unwrap. Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - u, ok := err.(Wrapper) - if !ok { - return nil - } - return u.Unwrap() -} - -// Is reports whether any error in err's chain matches target. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - // TODO: consider supporing target.Is(err). This would allow - // user-definable predicates, but also may allow for coping with sloppy - // APIs, thereby making it easier to get away with them. - if err = Unwrap(err); err == nil { - return false - } - } -} - -// As finds the first error in err's chain that matches the type to which target -// points, and if so, sets the target to its value and returns true. An error -// matches a type if it is assignable to the target type, or if it has a method -// As(interface{}) bool such that As(target) returns true. As will panic if target -// is not a non-nil pointer to a type which implements error or is of interface type. -// -// The As method should set the target to its value and return true if err -// matches the type to which target points. -func As(err error, target interface{}) bool { - if target == nil { - panic("errors: target cannot be nil") - } - val := reflect.ValueOf(target) - typ := val.Type() - if typ.Kind() != reflect.Ptr || val.IsNil() { - panic("errors: target must be a non-nil pointer") - } - if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { - panic("errors: *target must be interface or implement error") - } - targetType := typ.Elem() - for err != nil { - if reflect.TypeOf(err).AssignableTo(targetType) { - val.Elem().Set(reflect.ValueOf(err)) - return true - } - if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { - return true - } - err = Unwrap(err) - } - return false -} - -var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/gomodules.xyz/jsonpatch/v2/LICENSE b/vendor/gomodules.xyz/jsonpatch/v2/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go new file mode 100644 index 000000000..0ffb31560 --- /dev/null +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -0,0 +1,334 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +var errBadJSONDoc = fmt.Errorf("invalid JSON Document") + +type JsonPatchOperation = Operation + +type Operation struct { + Operation string `json:"op"` + Path string `json:"path"` + Value interface{} `json:"value,omitempty"` +} + +func (j *Operation) Json() string { + b, _ := json.Marshal(j) + return string(b) +} + +func (j *Operation) MarshalJSON() ([]byte, error) { + var b bytes.Buffer + b.WriteString("{") + b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation)) + b.WriteString(fmt.Sprintf(`,"path":"%s"`, j.Path)) + // Consider omitting Value for non-nullable operations. + if j.Value != nil || j.Operation == "replace" || j.Operation == "add" { + v, err := json.Marshal(j.Value) + if err != nil { + return nil, err + } + b.WriteString(`,"value":`) + b.Write(v) + } + b.WriteString("}") + return b.Bytes(), nil +} + +type ByPath []Operation + +func (a ByPath) Len() int { return len(a) } +func (a ByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } + +func NewOperation(op, path string, value interface{}) Operation { + return Operation{Operation: op, Path: path, Value: value} +} + +// CreatePatch creates a patch as specified in http://jsonpatch.com/ +// +// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. +// The function will return an array of JsonPatchOperations +// +// An error will be returned if any of the two documents are invalid. +func CreatePatch(a, b []byte) ([]Operation, error) { + var aI interface{} + var bI interface{} + err := json.Unmarshal(a, &aI) + if err != nil { + return nil, errBadJSONDoc + } + err = json.Unmarshal(b, &bI) + if err != nil { + return nil, errBadJSONDoc + } + return handleValues(aI, bI, "", []Operation{}) +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt, ok := bv.(string) + if ok && bt == at { + return true + } + case float64: + bt, ok := bv.(float64) + if ok && bt == at { + return true + } + case bool: + bt, ok := bv.(bool) + if ok && bt == at { + return true + } + case map[string]interface{}: + bt, ok := bv.(map[string]interface{}) + if !ok { + return false + } + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt, ok := bv.([]interface{}) + if !ok { + return false + } + if len(bt) != len(at) { + return false + } + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + } + return false +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. +// TODO decode support: +// var rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") + +var rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1") + +func makePath(path string, newPart interface{}) string { + key := rfc6901Encoder.Replace(fmt.Sprintf("%v", newPart)) + if path == "" { + return "/" + key + } + if strings.HasSuffix(path, "/") { + return path + key + } + return path + "/" + key +} + +// diff returns the (recursive) difference between a and b as an array of JsonPatchOperations. +func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operation, error) { + for key, bv := range b { + p := makePath(path, key) + av, ok := a[key] + // value was added + if !ok { + patch = append(patch, NewOperation("add", p, bv)) + continue + } + // Types are the same, compare values + var err error + patch, err = handleValues(av, bv, p, patch) + if err != nil { + return nil, err + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + p := makePath(path, key) + + patch = append(patch, NewOperation("remove", p, nil)) + } + } + return patch, nil +} + +func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, error) { + { + at := reflect.TypeOf(av) + bt := reflect.TypeOf(bv) + if at == nil && bt == nil { + // do nothing + return patch, nil + } else if at != bt { + // If types have changed, replace completely (preserves null in destination) + return append(patch, NewOperation("replace", p, bv)), nil + } + } + + var err error + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + patch, err = diff(at, bt, p, patch) + if err != nil { + return nil, err + } + case string, float64, bool: + if !matchesValue(av, bv) { + patch = append(patch, NewOperation("replace", p, bv)) + } + case []interface{}: + bt := bv.([]interface{}) + if isSimpleArray(at) && isSimpleArray(bt) { + patch = append(patch, compareEditDistance(at, bt, p)...) + } else { + n := min(len(at), len(bt)) + for i := len(at) - 1; i >= n; i-- { + patch = append(patch, NewOperation("remove", makePath(p, i), nil)) + } + for i := n; i < len(bt); i++ { + patch = append(patch, NewOperation("add", makePath(p, i), bt[i])) + } + for i := 0; i < n; i++ { + var err error + patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) + if err != nil { + return nil, err + } + } + } + default: + panic(fmt.Sprintf("Unknown type:%T ", av)) + } + return patch, nil +} + +func isBasicType(a interface{}) bool { + switch a.(type) { + case string, float64, bool: + default: + return false + } + return true +} + +func isSimpleArray(a []interface{}) bool { + for i := range a { + switch a[i].(type) { + case string, float64, bool: + default: + val := reflect.ValueOf(a[i]) + if val.Kind() == reflect.Map { + for _, k := range val.MapKeys() { + av := val.MapIndex(k) + if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface { + if av.IsNil() { + continue + } + av = av.Elem() + } + if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool { + return false + } + } + return true + } + return false + } + } + return true +} + +// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm +// Adapted from https://github.com/texttheater/golang-levenshtein +func compareEditDistance(s, t []interface{}, p string) []Operation { + m := len(s) + n := len(t) + + d := make([][]int, m+1) + for i := 0; i <= m; i++ { + d[i] = make([]int, n+1) + d[i][0] = i + } + for j := 0; j <= n; j++ { + d[0][j] = j + } + + for j := 1; j <= n; j++ { + for i := 1; i <= m; i++ { + if reflect.DeepEqual(s[i-1], t[j-1]) { + d[i][j] = d[i-1][j-1] // no op required + } else { + del := d[i-1][j] + 1 + add := d[i][j-1] + 1 + rep := d[i-1][j-1] + 1 + d[i][j] = min(rep, min(add, del)) + } + } + } + + return backtrace(s, t, p, m, n, d) +} + +func min(x int, y int) int { + if y < x { + return y + } + return x +} + +func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation { + if i > 0 && matrix[i-1][j]+1 == matrix[i][j] { + op := NewOperation("remove", makePath(p, i-1), nil) + return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...) + } + if j > 0 && matrix[i][j-1]+1 == matrix[i][j] { + op := NewOperation("add", makePath(p, i), t[j-1]) + return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...) + } + if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] { + if isBasicType(s[0]) { + op := NewOperation("replace", makePath(p, i-1), t[j-1]) + return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) + } + + p2, _ := handleValues(s[i-1], t[j-1], makePath(p, i-1), []Operation{}) + return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) + } + if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { + return backtrace(s, t, p, i-1, j-1, matrix) + } + return []Operation{} +} diff --git a/vendor/gopkg.in/gcfg.v1/errors.go b/vendor/gopkg.in/gcfg.v1/errors.go index 853c76021..83a591dac 100644 --- a/vendor/gopkg.in/gcfg.v1/errors.go +++ b/vendor/gopkg.in/gcfg.v1/errors.go @@ -1,8 +1,6 @@ package gcfg -import ( - "gopkg.in/warnings.v0" -) +import warnings "gopkg.in/warnings.v0" // FatalOnly filters the results of a Read*Into invocation and returns only // fatal errors. That is, errors (warnings) indicating data for unknown @@ -21,21 +19,39 @@ func isFatal(err error) bool { return !ok } -type extraData struct { +type loc struct { section string subsection *string variable *string } -func (e extraData) Error() string { - s := "can't store data at section \"" + e.section + "\"" - if e.subsection != nil { - s += ", subsection \"" + *e.subsection + "\"" +type extraData struct { + loc +} + +type locErr struct { + msg string + loc +} + +func (l loc) String() string { + s := "section \"" + l.section + "\"" + if l.subsection != nil { + s += ", subsection \"" + *l.subsection + "\"" } - if e.variable != nil { - s += ", variable \"" + *e.variable + "\"" + if l.variable != nil { + s += ", variable \"" + *l.variable + "\"" } return s } +func (e extraData) Error() string { + return "can't store data at " + e.loc.String() +} + +func (e locErr) Error() string { + return e.msg + " at " + e.loc.String() +} + var _ error = extraData{} +var _ error = locErr{} diff --git a/vendor/gopkg.in/gcfg.v1/go1_0.go b/vendor/gopkg.in/gcfg.v1/go1_0.go deleted file mode 100644 index 667021079..000000000 --- a/vendor/gopkg.in/gcfg.v1/go1_0.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !go1.2 - -package gcfg - -type textUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/gopkg.in/gcfg.v1/go1_2.go b/vendor/gopkg.in/gcfg.v1/go1_2.go deleted file mode 100644 index 6f5843bc7..000000000 --- a/vendor/gopkg.in/gcfg.v1/go1_2.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.2 - -package gcfg - -import ( - "encoding" -) - -type textUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/gopkg.in/gcfg.v1/read.go b/vendor/gopkg.in/gcfg.v1/read.go index 8400cf124..06796653c 100644 --- a/vendor/gopkg.in/gcfg.v1/read.go +++ b/vendor/gopkg.in/gcfg.v1/read.go @@ -1,6 +1,7 @@ package gcfg import ( + "bytes" "fmt" "io" "io/ioutil" @@ -13,6 +14,7 @@ import ( ) var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'} +var utf8Bom = []byte("\ufeff") // no error: invalid literals should be caught by scanner func unquote(s string) string { @@ -183,7 +185,6 @@ func readIntoPass(c *warnings.Collector, config interface{}, fset *token.FileSet } } } - panic("never reached") } func readInto(config interface{}, fset *token.FileSet, file *token.File, @@ -222,6 +223,9 @@ func ReadStringInto(config interface{}, str string) error { // ReadFileInto reads gcfg formatted data from the file filename and sets the // values into the corresponding fields in config. +// +// For compatibility with files created on Windows, the ReadFileInto skips a +// single leading UTF8 BOM sequence if it exists. func ReadFileInto(config interface{}, filename string) error { f, err := os.Open(filename) if err != nil { @@ -232,7 +236,22 @@ func ReadFileInto(config interface{}, filename string) error { if err != nil { return err } + + // Skips a single leading UTF8 BOM sequence if it exists. + src = skipLeadingUtf8Bom(src) + fset := token.NewFileSet() file := fset.AddFile(filename, fset.Base(), len(src)) return readInto(config, fset, file, src) } + +func skipLeadingUtf8Bom(src []byte) []byte { + lengthUtf8Bom := len(utf8Bom) + + if len(src) >= lengthUtf8Bom { + if bytes.Equal(src[:lengthUtf8Bom], utf8Bom) { + return src[lengthUtf8Bom:] + } + } + return src +} diff --git a/vendor/gopkg.in/gcfg.v1/scanner/scanner.go b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go index bbbdbf535..6d0eee916 100644 --- a/vendor/gopkg.in/gcfg.v1/scanner/scanner.go +++ b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go @@ -231,8 +231,8 @@ loop: hasCR = true s.next() } - if s.ch != '\n' { - s.error(offs, "unquoted '\\' must be followed by new line") + if s.ch != '\n' && s.ch != '"' { + s.error(offs, "unquoted '\\' must be followed by new line or double quote") break loop } s.next() diff --git a/vendor/gopkg.in/gcfg.v1/set.go b/vendor/gopkg.in/gcfg.v1/set.go index e85ec155d..73aee5003 100644 --- a/vendor/gopkg.in/gcfg.v1/set.go +++ b/vendor/gopkg.in/gcfg.v1/set.go @@ -2,6 +2,7 @@ package gcfg import ( "bytes" + "encoding" "encoding/gob" "fmt" "math/big" @@ -65,7 +66,7 @@ var setters = []setter{ } func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { - dtu, ok := d.(textUnmarshaler) + dtu, ok := d.(encoding.TextUnmarshaler) if !ok { return errUnsupportedType } @@ -192,7 +193,9 @@ func scanSetter(d interface{}, blank bool, val string, tt tag) error { return types.ScanFully(d, val, 'v') } -func newValue(sect string, vCfg reflect.Value, vType reflect.Type) (reflect.Value, error) { +func newValue(c *warnings.Collector, sect string, vCfg reflect.Value, + vType reflect.Type) (reflect.Value, error) { + // pv := reflect.New(vType) dfltName := "default-" + sect dfltField, _ := fieldFold(vCfg, dfltName) @@ -200,13 +203,11 @@ func newValue(sect string, vCfg reflect.Value, vType reflect.Type) (reflect.Valu if dfltField.IsValid() { b := bytes.NewBuffer(nil) ge := gob.NewEncoder(b) - err = ge.EncodeValue(dfltField) - if err != nil { + if err = c.Collect(ge.EncodeValue(dfltField)); err != nil { return pv, err } gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) - err = gd.DecodeValue(pv.Elem()) - if err != nil { + if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil { return pv, err } } @@ -222,8 +223,9 @@ func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, } vCfg := vPCfg.Elem() vSect, _ := fieldFold(vCfg, sect) + l := loc{section: sect} if !vSect.IsValid() { - err := extraData{section: sect} + err := extraData{loc: l} return c.Collect(err) } isSubsect := vSect.Kind() == reflect.Map @@ -231,6 +233,7 @@ func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, return nil } if isSubsect { + l.subsection = &sub vst := vSect.Type() if vst.Key().Kind() != reflect.String || vst.Elem().Kind() != reflect.Ptr || @@ -246,8 +249,7 @@ func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, if !pv.IsValid() { vType := vSect.Type().Elem().Elem() var err error - pv, err = newValue(sect, vCfg, vType) - if err != nil { + if pv, err = newValue(c, sect, vCfg, vType); err != nil { return err } vSect.SetMapIndex(k, pv) @@ -257,8 +259,7 @@ func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, panic(fmt.Errorf("field for section must be a map or a struct: "+ "section %q", sect)) } else if sub != "" { - err := extraData{section: sect, subsection: &sub} - return c.Collect(err) + return c.Collect(extraData{loc: l}) } // Empty name is a special value, meaning that only the // section/subsection object is to be created, with no values set. @@ -266,14 +267,9 @@ func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, return nil } vVar, t := fieldFold(vSect, name) + l.variable = &name if !vVar.IsValid() { - var err error - if isSubsect { - err = extraData{section: sect, subsection: &sub, variable: &name} - } else { - err = extraData{section: sect, variable: &name} - } - return c.Collect(err) + return c.Collect(extraData{loc: l}) } // vVal is either single-valued var, or newly allocated value within multi-valued var var vVal reflect.Value @@ -316,12 +312,12 @@ func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, break } if err != errUnsupportedType { - return err + return locErr{msg: err.Error(), loc: l} } } if !ok { // in case all setters returned errUnsupportedType - return err + return locErr{msg: err.Error(), loc: l} } if isNew { // set reference if it was dereferenced and newly allocated vVal.Set(vAddr) diff --git a/vendor/k8s.io/client-go/metadata/interface.go b/vendor/k8s.io/client-go/metadata/interface.go new file mode 100644 index 000000000..127c39501 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/interface.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// Interface allows a caller to get the metadata (in the form of PartialObjectMetadata objects) +// from any Kubernetes compatible resource API. +type Interface interface { + Resource(resource schema.GroupVersionResource) Getter +} + +// ResourceInterface contains the set of methods that may be invoked on objects by their metadata. +// Update is not supported by the server, but Patch can be used for the actions Update would handle. +type ResourceInterface interface { + Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error + DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) + List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) +} + +// Getter handles both namespaced and non-namespaced resource types consistently. +type Getter interface { + Namespace(string) ResourceInterface + ResourceInterface +} diff --git a/vendor/k8s.io/client-go/metadata/metadata.go b/vendor/k8s.io/client-go/metadata/metadata.go new file mode 100644 index 000000000..8152aa124 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadata.go @@ -0,0 +1,331 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "k8s.io/klog/v2" + + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" +) + +var deleteScheme = runtime.NewScheme() +var parameterScheme = runtime.NewScheme() +var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme) +var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) + +var versionV1 = schema.GroupVersion{Version: "v1"} + +func init() { + metav1.AddToGroupVersion(parameterScheme, versionV1) + metav1.AddToGroupVersion(deleteScheme, versionV1) +} + +// Client allows callers to retrieve the object metadata for any +// Kubernetes-compatible API endpoint. The client uses the +// meta.k8s.io/v1 PartialObjectMetadata resource to more efficiently +// retrieve just the necessary metadata, but on older servers +// (Kubernetes 1.14 and before) will retrieve the object and then +// convert the metadata. +type Client struct { + client *rest.RESTClient +} + +var _ Interface = &Client{} + +// ConfigFor returns a copy of the provided config with the +// appropriate metadata client defaults set. +func ConfigFor(inConfig *rest.Config) *rest.Config { + config := rest.CopyConfig(inConfig) + config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + config.ContentType = "application/vnd.kubernetes.protobuf" + config.NegotiatedSerializer = metainternalversionscheme.Codecs.WithoutConversion() + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + return config +} + +// NewForConfigOrDie creates a new metadata client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) Interface { + ret, err := NewForConfig(c) + if err != nil { + panic(err) + } + return ret +} + +// NewForConfig creates a new metadata client that can retrieve object +// metadata details about any Kubernetes object (core, aggregated, or custom +// resource based) in the form of PartialObjectMetadata objects, or returns +// an error. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(inConfig *rest.Config) (Interface, error) { + config := ConfigFor(inConfig) + + httpClient, err := rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(config, httpClient) +} + +// NewForConfigAndClient creates a new metadata client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, error) { + config := ConfigFor(inConfig) + // for serializing the options + config.GroupVersion = &schema.GroupVersion{} + config.APIPath = "/this-value-should-never-be-sent" + + restClient, err := rest.RESTClientForConfigAndClient(config, h) + if err != nil { + return nil, err + } + + return &Client{client: restClient}, nil +} + +type client struct { + client *Client + namespace string + resource schema.GroupVersionResource +} + +// Resource returns an interface that can access cluster or namespace +// scoped instances of resource. +func (c *Client) Resource(resource schema.GroupVersionResource) Getter { + return &client{client: c, resource: resource} +} + +// Namespace returns an interface that can access namespace-scoped instances of the +// provided resource. +func (c *client) Namespace(ns string) ResourceInterface { + ret := *c + ret.namespace = ns + return &ret +} + +// Delete removes the provided resource from the server. +func (c *client) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { + if len(name) == 0 { + return fmt.Errorf("name is required") + } + // if DeleteOptions are delivered to Negotiator for serialization, + // HTTP-Request header will bring "Content-Type: application/vnd.kubernetes.protobuf" + // apiextensions-apiserver uses unstructuredNegotiatedSerializer to decode the input, + // server-side will reply with 406 errors. + // The special treatment here is to be compatible with CRD Handler + // see: https://github.com/kubernetes/kubernetes/blob/1a845ccd076bbf1b03420fe694c85a5cd3bd6bed/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go#L843 + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + SetHeader("Content-Type", runtime.ContentTypeJSON). + Body(deleteOptionsByte). + Do(ctx) + return result.Error() +} + +// DeleteCollection triggers deletion of all resources in the specified scope (namespace or cluster). +func (c *client) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + // See comment on Delete + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Content-Type", runtime.ContentTypeJSON). + Body(deleteOptionsByte). + SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). + Do(ctx) + return result.Error() +} + +// Get returns the resource with name from the specified scope (namespace or cluster). +func (c *client) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadata: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema: %#v", partial) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// List returns all resources within the specified scope (namespace or cluster). +func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + result := c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadataList: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadataList + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadataList: %v", err) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// Watch finds all changes to the resources in the specified scope (namespace or cluster). +func (c *client) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.client.Get(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + Watch(ctx) +} + +// Patch modifies the named resource in the specified scope (namespace or cluster). +func (c *client) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client. + Patch(pt). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(data). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema") + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +func (c *client) makeURLSegments(name string) []string { + url := []string{} + if len(c.resource.Group) == 0 { + url = append(url, "api") + } else { + url = append(url, "apis", c.resource.Group) + } + url = append(url, c.resource.Version) + + if len(c.namespace) > 0 { + url = append(url, "namespaces", c.namespace) + } + url = append(url, c.resource.Resource) + + if len(name) > 0 { + url = append(url, name) + } + + return url +} + +func isLikelyObjectMetadata(meta *metav1.PartialObjectMetadata) bool { + return len(meta.UID) > 0 || !meta.CreationTimestamp.IsZero() || len(meta.Name) > 0 || len(meta.GenerateName) > 0 +} diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml new file mode 100644 index 000000000..5677664c2 --- /dev/null +++ b/vendor/k8s.io/klog/.travis.yml @@ -0,0 +1,16 @@ +language: go +go_import_path: k8s.io/klog +dist: xenial +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(golint $(go list -e ./...)) + - go tool vet . || go vet . + - go test -v -race ./... +install: + - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md new file mode 100644 index 000000000..574a56abb --- /dev/null +++ b/vendor/k8s.io/klog/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) diff --git a/vendor/k8s.io/klog/LICENSE b/vendor/k8s.io/klog/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/k8s.io/klog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS new file mode 100644 index 000000000..380e514f2 --- /dev/null +++ b/vendor/k8s.io/klog/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - jayunit100 + - hoegaarden + - andyxning + - neolit123 + - pohly + - yagonobre + - vincepri + - detiber +approvers: + - dims + - thockin + - justinsb + - tallclair + - piosz + - brancz + - DirectXMan12 + - lavalamp diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md new file mode 100644 index 000000000..841468b4b --- /dev/null +++ b/vendor/k8s.io/klog/README.md @@ -0,0 +1,97 @@ +klog +==== + +klog is a permanent fork of https://github.com/golang/glog. + +## Why was klog created? + +The decision to create klog was one that wasn't made lightly, but it was necessary due to some +drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: + +> The code in this repo [...] is not itself under development + +This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: + + * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. + * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it + * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. + +Historical context is available here: + + * https://github.com/kubernetes/kubernetes/issues/61006 + * https://github.com/kubernetes/kubernetes/issues/70264 + * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ + * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ + +---- + +How to use klog +=============== +- Replace imports for `github.com/golang/glog` with `k8s.io/klog` +- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags +- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) +- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) + +### Coexisting with glog +This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +---- + +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md new file mode 100644 index 000000000..b53eb960c --- /dev/null +++ b/vendor/k8s.io/klog/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `klog` is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS new file mode 100644 index 000000000..6128a5869 --- /dev/null +++ b/vendor/k8s.io/klog/SECURITY_CONTACTS @@ -0,0 +1,20 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +dims +thockin +justinsb +tallclair +piosz +brancz +DirectXMan12 +lavalamp diff --git a/vendor/k8s.io/klog/code-of-conduct.md b/vendor/k8s.io/klog/code-of-conduct.md new file mode 100644 index 000000000..0d15c00cf --- /dev/null +++ b/vendor/k8s.io/klog/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go new file mode 100644 index 000000000..2712ce0af --- /dev/null +++ b/vendor/k8s.io/klog/klog.go @@ -0,0 +1,1308 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// klog.Info("Prepare to repel boarders") +// +// klog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if klog.V(2) { +// klog.Info("Starting transaction...") +// } +// +// klog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to standard error. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=true +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package klog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "math" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.ParseInt(patLev[1], 10, 32) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +// init sets up the defaults and runs flushDaemon. +func init() { + logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. + logging.setVState(0, nil, false) + logging.logDir = "" + logging.logFile = "" + logging.logFileMaxSizeMB = 1800 + logging.toStderr = true + logging.alsoToStderr = false + logging.skipHeaders = false + logging.addDirHeader = false + logging.skipLogHeaders = false + go logging.flushDaemon() +} + +// InitFlags is for explicitly initializing the flags. +func InitFlags(flagset *flag.FlagSet) { + if flagset == nil { + flagset = flag.CommandLine + } + + flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") + flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") + flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, + "Defines the maximum size a log file can grow to. Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") + flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") + flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") + flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ + + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir string + + // If non-empty, specifies the path of the file to write logs. mutually exclusive + // with the log-dir option. + logFile string + + // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the + // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. + logFileMaxSizeMB uint64 + + // If true, do not add the prefix headers, useful when used with SetOutput + skipHeaders bool + + // If true, do not add the headers to log files + skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + if l.skipHeaders { + return buf + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// redirectBuffer is used to set an alternate destination for the logs +type redirectBuffer struct { + w io.Writer +} + +func (rb *redirectBuffer) Sync() error { + return nil +} + +func (rb *redirectBuffer) Flush() error { + return nil +} + +func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { + return rb.w.Write(bytes) +} + +// SetOutput sets the output destination for all severities +func SetOutput(w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() + for s := fatalLog; s >= infoLog; s-- { + rb := &redirectBuffer{ + w: w, + } + logging.file[s] = rb + } +} + +// SetOutputBySeverity sets the output destination for specific severity +func SetOutputBySeverity(name string, w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) + } + rb := &redirectBuffer{ + w: w, + } + logging.file[sev] = rb +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[infoLog] == nil { + if err := l.createFiles(infoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + l.file[infoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when klog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file + maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. +func CalculateMaxSize() uint64 { + if logging.logFile != "" { + if logging.logFileMaxSizeMB == 0 { + // If logFileMaxSizeMB is zero, we don't have limitations on the log size. + return math.MaxUint64 + } + // Flag logFileMaxSizeMB is in MB for user convenience. + return logging.logFileMaxSizeMB * 1024 * 1024 + } + // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. + return MaxSize +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= sb.maxbytes { + if err := sb.rotateFile(time.Now(), false); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now, startup) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + if sb.logger.skipLogHeaders { + return nil + } + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + maxbytes: CalculateMaxSize(), + } + if err := sb.rotateFile(now, true); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 5 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if klog.V(2) { klog.Info("log this") } +// or +// klog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go new file mode 100644 index 000000000..e4010ad4d --- /dev/null +++ b/vendor/k8s.io/klog/klog_file.go @@ -0,0 +1,139 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package klog + +import ( + "errors" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +func createLogDirs() { + if logging.logDir != "" { + logDirs = append(logDirs, logging.logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { + if logging.logFile != "" { + f, err := openOrCreate(logging.logFile, startup) + if err == nil { + return f, logging.logFile, nil + } + return nil, "", fmt.Errorf("log: unable to create log: %v", err) + } + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := openOrCreate(fname, startup) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} + +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func openOrCreate(name string, startup bool) (*os.File, error) { + if startup { + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + return f, err + } + f, err := os.Create(name) + return f, err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2de4c7a76..cf4f6f5d2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -52,8 +52,8 @@ github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock # github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab ## explicit github.com/JeffAshton/win_pdh -# github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd -## explicit +# github.com/MakeNowJust/heredoc v1.0.0 +## explicit; go 1.12 github.com/MakeNowJust/heredoc # github.com/Microsoft/go-winio v0.4.17 ## explicit; go 1.12 @@ -275,7 +275,7 @@ github.com/emirpasic/gods/utils # github.com/euank/go-kmsg-parser v2.0.0+incompatible ## explicit github.com/euank/go-kmsg-parser/kmsgparser -# github.com/evanphx/json-patch v4.12.0+incompatible +# github.com/evanphx/json-patch v5.6.0+incompatible ## explicit github.com/evanphx/json-patch # github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d @@ -290,7 +290,7 @@ github.com/felixge/httpsnoop # github.com/form3tech-oss/jwt-go v3.2.3+incompatible ## explicit github.com/form3tech-oss/jwt-go -# github.com/fsnotify/fsnotify v1.4.9 +# github.com/fsnotify/fsnotify v1.5.1 ## explicit; go 1.13 github.com/fsnotify/fsnotify # github.com/fsouza/go-dockerclient v1.7.1 @@ -439,7 +439,7 @@ github.com/google/gofuzz/bytesource # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex -# github.com/google/uuid v1.1.2 +# github.com/google/uuid v1.2.0 ## explicit github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.5 @@ -509,8 +509,8 @@ github.com/hpcloud/tail/ratelimiter github.com/hpcloud/tail/util github.com/hpcloud/tail/watch github.com/hpcloud/tail/winfile -# github.com/imdario/mergo v0.3.7 -## explicit +# github.com/imdario/mergo v0.3.12 +## explicit; go 1.13 github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 ## explicit @@ -633,16 +633,14 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.10.1 -## explicit +# github.com/onsi/gomega v1.18.1 +## explicit; go 1.16 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/gstruct github.com/onsi/gomega/gstruct/errors -github.com/onsi/gomega/internal/assertion -github.com/onsi/gomega/internal/asyncassertion -github.com/onsi/gomega/internal/oraclematcher -github.com/onsi/gomega/internal/testingtsupport +github.com/onsi/gomega/internal +github.com/onsi/gomega/internal/gutil github.com/onsi/gomega/matchers github.com/onsi/gomega/matchers/support/goraph/bipartitegraph github.com/onsi/gomega/matchers/support/goraph/edge @@ -692,7 +690,7 @@ github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/api v0.0.0-20220525145417-ee5b62754c68 +# github.com/openshift/api v0.0.0-20220531073726-6c4f186339a7 ## explicit; go 1.16 github.com/openshift/api/apps/v1 github.com/openshift/api/authorization/v1 @@ -702,6 +700,8 @@ github.com/openshift/api/config/v1 github.com/openshift/api/image/docker10 github.com/openshift/api/image/dockerpre012 github.com/openshift/api/image/v1 +github.com/openshift/api/machine/v1 +github.com/openshift/api/machine/v1beta1 github.com/openshift/api/network/v1 github.com/openshift/api/oauth/v1 github.com/openshift/api/operator/v1 @@ -763,6 +763,12 @@ github.com/openshift/client-go/template/clientset/versioned/typed/template/v1 github.com/openshift/client-go/user/clientset/versioned github.com/openshift/client-go/user/clientset/versioned/scheme github.com/openshift/client-go/user/clientset/versioned/typed/user/v1 +# github.com/openshift/cluster-api-actuator-pkg v0.0.0-20220726100012-d4179aa1372d +## explicit; go 1.18 +github.com/openshift/cluster-api-actuator-pkg/pkg/framework +# github.com/openshift/cluster-autoscaler-operator v0.0.1-0.20220327153331-e850c080b4f5 +## explicit; go 1.17 +github.com/openshift/cluster-autoscaler-operator/pkg/apis/autoscaling/v1 # github.com/openshift/library-go v0.0.0-20220525173854-9b950a41acdc ## explicit; go 1.17 github.com/openshift/library-go/pkg/apps/appsserialization @@ -792,6 +798,13 @@ github.com/openshift/library-go/pkg/operator/resource/resourceread github.com/openshift/library-go/pkg/operator/v1helpers github.com/openshift/library-go/pkg/serviceability github.com/openshift/library-go/test/library/metrics +# github.com/openshift/machine-api-operator v0.2.1-0.20220608065814-f76a8f3ab734 +## explicit; go 1.18 +github.com/openshift/machine-api-operator/pkg/util/lifecyclehooks +github.com/openshift/machine-api-operator/pkg/webhooks +# github.com/openshift/machine-api-provider-openstack v0.0.0-20220707045855-23e8c4523dac +## explicit; go 1.18 +github.com/openshift/machine-api-provider-openstack/pkg/apis/openstackproviderconfig/v1alpha1 # github.com/openshift/origin v1.5.0-alpha.3.0.20220815155142-e6728e5e1d97 ## explicit; go 1.18 github.com/openshift/origin/pkg/monitor @@ -863,6 +876,7 @@ github.com/pmezard/go-difflib/difflib github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil @@ -940,10 +954,12 @@ github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df ## explicit; go 1.12 github.com/vishvananda/netns -# github.com/vmware/govmomi v0.20.3 -## explicit +# github.com/vmware/govmomi v0.27.4 +## explicit; go 1.14 github.com/vmware/govmomi github.com/vmware/govmomi/find +github.com/vmware/govmomi/history +github.com/vmware/govmomi/internal github.com/vmware/govmomi/list github.com/vmware/govmomi/lookup github.com/vmware/govmomi/lookup/methods @@ -955,12 +971,14 @@ github.com/vmware/govmomi/pbm/methods github.com/vmware/govmomi/pbm/types github.com/vmware/govmomi/property github.com/vmware/govmomi/session +github.com/vmware/govmomi/session/keepalive github.com/vmware/govmomi/sts github.com/vmware/govmomi/sts/internal github.com/vmware/govmomi/task github.com/vmware/govmomi/vapi/internal github.com/vmware/govmomi/vapi/rest github.com/vmware/govmomi/vapi/tags +github.com/vmware/govmomi/view github.com/vmware/govmomi/vim25 github.com/vmware/govmomi/vim25/debug github.com/vmware/govmomi/vim25/methods @@ -1092,13 +1110,13 @@ go.starlark.net/resolve go.starlark.net/starlark go.starlark.net/starlarkstruct go.starlark.net/syntax -# go.uber.org/atomic v1.7.0 +# go.uber.org/atomic v1.8.0 ## explicit; go 1.13 go.uber.org/atomic # go.uber.org/multierr v1.6.0 ## explicit; go 1.12 go.uber.org/multierr -# go.uber.org/zap v1.19.0 +# go.uber.org/zap v1.19.1 ## explicit; go 1.13 go.uber.org/zap go.uber.org/zap/buffer @@ -1202,10 +1220,9 @@ golang.org/x/text/width # golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 ## explicit golang.org/x/time/rate -# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 -## explicit; go 1.11 -golang.org/x/xerrors -golang.org/x/xerrors/internal +# gomodules.xyz/jsonpatch/v2 v2.2.0 +## explicit; go 1.12 +gomodules.xyz/jsonpatch/v2 # google.golang.org/api v0.46.0 ## explicit; go 1.11 google.golang.org/api/compute/v0.alpha @@ -1336,7 +1353,7 @@ google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/fsnotify.v1 v1.4.7 ## explicit gopkg.in/fsnotify.v1 -# gopkg.in/gcfg.v1 v1.2.0 +# gopkg.in/gcfg.v1 v1.2.3 ## explicit gopkg.in/gcfg.v1 gopkg.in/gcfg.v1/scanner @@ -1419,7 +1436,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.24.0 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220601165048-899fd9fb835c +# k8s.io/api v0.24.2 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20220601165048-899fd9fb835c ## explicit; go 1.16 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1469,7 +1486,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.24.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220601165048-899fd9fb835c +# k8s.io/apiextensions-apiserver v0.24.2 => github.com/openshift/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20220601165048-899fd9fb835c ## explicit; go 1.16 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1517,7 +1534,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures k8s.io/apiextensions-apiserver/third_party/forked/celopenapi/model -# k8s.io/apimachinery v0.24.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220601165048-899fd9fb835c +# k8s.io/apimachinery v0.24.2 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20220601165048-899fd9fb835c ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -1581,7 +1598,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.24.0 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220601165048-899fd9fb835c +# k8s.io/apiserver v0.24.1 => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20220601165048-899fd9fb835c ## explicit; go 1.16 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration @@ -1711,12 +1728,12 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/cli-runtime v0.24.0 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220601165048-899fd9fb835c +# k8s.io/cli-runtime v0.24.1 => github.com/openshift/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20220601165048-899fd9fb835c ## explicit; go 1.16 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.24.0 => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220601165048-899fd9fb835c +# k8s.io/client-go v12.0.0+incompatible => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20220601165048-899fd9fb835c ## explicit; go 1.16 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1964,6 +1981,7 @@ k8s.io/client-go/listers/scheduling/v1beta1 k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 +k8s.io/client-go/metadata k8s.io/client-go/openapi k8s.io/client-go/openapi/cached k8s.io/client-go/pkg/apis/clientauthentication @@ -2031,7 +2049,7 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.24.0 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220601165048-899fd9fb835c +# k8s.io/component-base v0.24.2 => github.com/openshift/kubernetes/staging/src/k8s.io/component-base v0.0.0-20220601165048-899fd9fb835c ## explicit; go 1.16 k8s.io/component-base/cli/flag k8s.io/component-base/codec @@ -2076,6 +2094,9 @@ k8s.io/cri-api/pkg/errors ## explicit; go 1.16 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins +# k8s.io/klog v1.0.0 +## explicit; go 1.12 +k8s.io/klog # k8s.io/klog/v2 v2.60.1 ## explicit; go 1.13 k8s.io/klog/v2 @@ -2083,7 +2104,7 @@ k8s.io/klog/v2/internal/buffer k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-aggregator v0.24.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220601165048-899fd9fb835c +# k8s.io/kube-aggregator v0.24.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20220601165048-899fd9fb835c ## explicit; go 1.16 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 @@ -2124,7 +2145,7 @@ k8s.io/kube-proxy/config/v1alpha1 k8s.io/kube-scheduler/config/v1beta2 k8s.io/kube-scheduler/config/v1beta3 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.24.0 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220601165048-899fd9fb835c +# k8s.io/kubectl v0.24.1 => github.com/openshift/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20220601165048-899fd9fb835c ## explicit; go 1.16 k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd/util @@ -2634,6 +2655,49 @@ k8s.io/utils/trace ## explicit; go 1.17 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client +# sigs.k8s.io/controller-runtime v0.12.3 +## explicit; go 1.17 +sigs.k8s.io/controller-runtime +sigs.k8s.io/controller-runtime/pkg/builder +sigs.k8s.io/controller-runtime/pkg/cache +sigs.k8s.io/controller-runtime/pkg/cache/internal +sigs.k8s.io/controller-runtime/pkg/certwatcher +sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics +sigs.k8s.io/controller-runtime/pkg/client +sigs.k8s.io/controller-runtime/pkg/client/apiutil +sigs.k8s.io/controller-runtime/pkg/client/config +sigs.k8s.io/controller-runtime/pkg/cluster +sigs.k8s.io/controller-runtime/pkg/config +sigs.k8s.io/controller-runtime/pkg/config/v1alpha1 +sigs.k8s.io/controller-runtime/pkg/controller +sigs.k8s.io/controller-runtime/pkg/controller/controllerutil +sigs.k8s.io/controller-runtime/pkg/conversion +sigs.k8s.io/controller-runtime/pkg/event +sigs.k8s.io/controller-runtime/pkg/handler +sigs.k8s.io/controller-runtime/pkg/healthz +sigs.k8s.io/controller-runtime/pkg/internal/controller +sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics +sigs.k8s.io/controller-runtime/pkg/internal/httpserver +sigs.k8s.io/controller-runtime/pkg/internal/log +sigs.k8s.io/controller-runtime/pkg/internal/objectutil +sigs.k8s.io/controller-runtime/pkg/internal/recorder +sigs.k8s.io/controller-runtime/pkg/leaderelection +sigs.k8s.io/controller-runtime/pkg/log +sigs.k8s.io/controller-runtime/pkg/manager +sigs.k8s.io/controller-runtime/pkg/manager/signals +sigs.k8s.io/controller-runtime/pkg/metrics +sigs.k8s.io/controller-runtime/pkg/predicate +sigs.k8s.io/controller-runtime/pkg/ratelimiter +sigs.k8s.io/controller-runtime/pkg/reconcile +sigs.k8s.io/controller-runtime/pkg/recorder +sigs.k8s.io/controller-runtime/pkg/runtime/inject +sigs.k8s.io/controller-runtime/pkg/scheme +sigs.k8s.io/controller-runtime/pkg/source +sigs.k8s.io/controller-runtime/pkg/source/internal +sigs.k8s.io/controller-runtime/pkg/webhook +sigs.k8s.io/controller-runtime/pkg/webhook/admission +sigs.k8s.io/controller-runtime/pkg/webhook/conversion +sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics # sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 ## explicit; go 1.17 sigs.k8s.io/json @@ -2730,7 +2794,7 @@ sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value -# sigs.k8s.io/yaml v1.2.0 +# sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml # github.com/google/cadvisor => github.com/google/cadvisor v0.44.1 diff --git a/vendor/sigs.k8s.io/controller-runtime/.gitignore b/vendor/sigs.k8s.io/controller-runtime/.gitignore new file mode 100644 index 000000000..c2c72faf3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/.gitignore @@ -0,0 +1,24 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ + +# Vscode files +.vscode + +# Tools binaries. +hack/tools/bin diff --git a/vendor/sigs.k8s.io/controller-runtime/.golangci.yml b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml new file mode 100644 index 000000000..77f528ff0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/.golangci.yml @@ -0,0 +1,141 @@ +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - deadcode + - depguard + - dogsled + - errcheck + - errorlint + - exportloopref + - goconst + - gocritic + - gocyclo + - godot + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - ifshort + - importas + - ineffassign + - misspell + - nakedret + - nilerr + - nolintlint + - prealloc + - revive + - rowserrcheck + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + +linters-settings: + ifshort: + # Maximum length of variable declaration measured in number of characters, after which linter won't suggest using short syntax. + max-decl-chars: 50 + importas: + no-unaliased: true + alias: + # Kubernetes + - pkg: k8s.io/api/core/v1 + alias: corev1 + - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 + alias: apiextensionsv1 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors + - pkg: k8s.io/apimachinery/pkg/util/errors + alias: kerrors + # Controller Runtime + - pkg: sigs.k8s.io/controller-runtime + alias: ctrl + staticcheck: + go: "1.18" + stylecheck: + go: "1.18" + depguard: + include-go-root: true + packages: + - io/ioutil # https://go.dev/doc/go1.16#ioutil + +issues: + max-same-issues: 0 + max-issues-per-linter: 0 + # We are disabling default golangci exclusions because we want to help reviewers to focus on reviewing the most relevant + # changes in PRs and avoid nitpicking. + exclude-use-default: false + # List of regexps of issue texts to exclude, empty list by default. + exclude: + # The following are being worked on to remove their exclusion. This list should be reduced or go away all together over time. + # If it is decided they will not be addressed they should be moved above this comment. + - Subprocess launch(ed with variable|ing should be audited) + - (G204|G104|G307) + - "ST1000: at least one file in a package should have a package comment" + exclude-rules: + - linters: + - gosec + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" + - linters: + - revive + text: "exported: exported method .*\\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported" + - linters: + - errcheck + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + # With Go 1.16, the new embed directive can be used with an un-named import, + # revive (previously, golint) only allows these to be imported in a main.go, which wouldn't work for us. + # This directive allows the embed package to be imported with an underscore everywhere. + - linters: + - revive + source: _ "embed" + # Exclude some packages or code to require comments, for example test code, or fake clients. + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + source: (func|type).*Fake.* + - linters: + - revive + text: exported (method|function|type|const) (.+) should have comment or be unexported + path: fake_\.go + # Disable unparam "always receives" which might not be really + # useful when building libraries. + - linters: + - unparam + text: always receives + # Dot imports for gomega or ginkgo are allowed + # within test files. + - path: _test\.go + text: should not use dot imports + - path: _test\.go + text: cyclomatic complexity + - path: _test\.go + text: "G107: Potential HTTP request made with variable url" + # Append should be able to assign to a different var/slice. + - linters: + - gocritic + text: "appendAssign: append result not assigned to the same slice" + - linters: + - gocritic + text: "singleCaseSwitch: should rewrite switch statement to if statement" + # It considers all file access to a filename that comes from a variable problematic, + # which is naiv at best. + - linters: + - gosec + text: "G304: Potential file inclusion via variable" + +run: + timeout: 10m + skip-files: + - "zz_generated.*\\.go$" + - ".*conversion.*\\.go$" + allow-parallel-runners: true diff --git a/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md new file mode 100644 index 000000000..2c0ea1f66 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/CONTRIBUTING.md @@ -0,0 +1,19 @@ +# Contributing guidelines + +## Sign the CLA + +Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests. + +Please see https://git.k8s.io/community/CLA.md for more info + +## Contributing steps + +1. Submit an issue describing your proposed change to the repo in question. +1. The [repo owners](OWNERS) will respond to your issue promptly. +1. If your proposed change is accepted, and you haven't already done so, sign a Contributor License Agreement (see details above). +1. Fork the desired repo, develop and test your code changes. +1. Submit a pull request. + +## Test locally + +Run the command `make test` to test the changes locally. diff --git a/vendor/sigs.k8s.io/controller-runtime/FAQ.md b/vendor/sigs.k8s.io/controller-runtime/FAQ.md new file mode 100644 index 000000000..c21b29e28 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/FAQ.md @@ -0,0 +1,81 @@ +# FAQ + +### Q: How do I know which type of object a controller references? + +**A**: Each controller should only reconcile one object type. Other +affected objects should be mapped to a single type of root object, using +the `EnqueueRequestForOwner` or `EnqueueRequestsFromMapFunc` event +handlers, and potentially indices. Then, your Reconcile method should +attempt to reconcile *all* state for that given root objects. + +### Q: How do I have different logic in my reconciler for different types of events (e.g. create, update, delete)? + +**A**: You should not. Reconcile functions should be idempotent, and +should always reconcile state by reading all the state it needs, then +writing updates. This allows your reconciler to correctly respond to +generic events, adjust to skipped or coalesced events, and easily deal +with application startup. The controller will enqueue reconcile requests +for both old and new objects if a mapping changes, but it's your +responsibility to make sure you have enough information to be able clean +up state that's no longer referenced. + +### Q: My cache might be stale if I read from a cache! How should I deal with that? + +**A**: There are several different approaches that can be taken, depending +on your situation. + +- When you can, take advantage of optimistic locking: use deterministic + names for objects you create, so that the Kubernetes API server will + warn you if the object already exists. Many controllers in Kubernetes + take this approach: the StatefulSet controller appends a specific number + to each pod that it creates, while the Deployment controller hashes the + pod template spec and appends that. + +- In the few cases when you cannot take advantage of deterministic names + (e.g. when using generateName), it may be useful in to track which + actions you took, and assume that they need to be repeated if they don't + occur after a given time (e.g. using a requeue result). This is what + the ReplicaSet controller does. + +In general, write your controller with the assumption that information +will eventually be correct, but may be slightly out of date. Make sure +that your reconcile function enforces the entire state of the world each +time it runs. If none of this works for you, you can always construct +a client that reads directly from the API server, but this is generally +considered to be a last resort, and the two approaches above should +generally cover most circumstances. + +### Q: Where's the fake client? How do I use it? + +**A**: The fake client +[exists](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client/fake), +but we generally recommend using +[envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) +to test against a real API server. In our experience, tests using fake +clients gradually re-implement poorly-written impressions of a real API +server, which leads to hard-to-maintain, complex test code. + +### Q: How should I write tests? Any suggestions for getting started? + +- Use the aforementioned + [envtest.Environment](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#Environment) + to spin up a real API server instead of trying to mock one out. + +- Structure your tests to check that the state of the world is as you + expect it, *not* that a particular set of API calls were made, when + working with Kubernetes APIs. This will allow you to more easily + refactor and improve the internals of your controllers without changing + your tests. + +- Remember that any time you're interacting with the API server, changes + may have some delay between write time and reconcile time. + +### Q: What are these errors about no Kind being registered for a type? + +**A**: You're probably missing a fully-set-up Scheme. Schemes record the +mapping between Go types and group-version-kinds in Kubernetes. In +general, your application should have its own Scheme containing the types +from the API groups that it needs (be they Kubernetes types or your own). +See the [scheme builder +docs](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/scheme) for +more information. diff --git a/vendor/sigs.k8s.io/controller-runtime/LICENSE b/vendor/sigs.k8s.io/controller-runtime/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-runtime/Makefile b/vendor/sigs.k8s.io/controller-runtime/Makefile new file mode 100644 index 000000000..36647c697 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/Makefile @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +# Copyright 2020 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# If you update this file, please follow +# https://suva.sh/posts/well-documented-makefiles + +## -------------------------------------- +## General +## -------------------------------------- + +SHELL:=/usr/bin/env bash +.DEFAULT_GOAL:=help + +# Use GOPROXY environment variable if set +GOPROXY := $(shell go env GOPROXY) +ifeq ($(GOPROXY),) +GOPROXY := https://proxy.golang.org +endif +export GOPROXY + +# Active module mode, as we use go modules to manage dependencies +export GO111MODULE=on + +# Tools. +TOOLS_DIR := hack/tools +TOOLS_BIN_DIR := $(TOOLS_DIR)/bin +GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/golangci-lint) +GO_APIDIFF := $(TOOLS_BIN_DIR)/go-apidiff +CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen +ENVTEST_DIR := $(abspath tools/setup-envtest) + +# The help will print out all targets with their descriptions organized bellow their categories. The categories are represented by `##@` and the target descriptions by `##`. +# The awk commands is responsible to read the entire set of makefiles included in this invocation, looking for lines of the file as xyz: ## something, and then pretty-format the target and help. Then, if there's a line with ##@ something, that gets pretty-printed as a category. +# More info over the usage of ANSI control characters for terminal formatting: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info over awk command: http://linuxcommand.org/lc3_adv_awk.php +.PHONY: help +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +## -------------------------------------- +## Testing +## -------------------------------------- + +.PHONY: test +test: test-tools ## Run the script check-everything.sh which will check all. + TRACE=1 ./hack/check-everything.sh + +.PHONY: test-tools +test-tools: ## tests the tools codebase (setup-envtest) + cd tools/setup-envtest && go test ./... + +## -------------------------------------- +## Binaries +## -------------------------------------- + +$(GO_APIDIFF): $(TOOLS_DIR)/go.mod # Build go-apidiff from tools folder. + cd $(TOOLS_DIR) && go build -tags=tools -o bin/go-apidiff github.com/joelanford/go-apidiff + +$(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod # Build controller-gen from tools folder. + cd $(TOOLS_DIR) && go build -tags=tools -o bin/controller-gen sigs.k8s.io/controller-tools/cmd/controller-gen + +$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golanci-lint using hack script into tools folder. + hack/ensure-golangci-lint.sh \ + -b $(TOOLS_BIN_DIR) \ + $(shell cat .github/workflows/golangci-lint.yml | grep version | sed 's/.*version: //') + +## -------------------------------------- +## Linting +## -------------------------------------- + +.PHONY: lint +lint: $(GOLANGCI_LINT) ## Lint codebase + $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) + cd tools/setup-envtest; $(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_EXTRA_ARGS) + +.PHONY: lint-fix +lint-fix: $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported by the linter. + GOLANGCI_LINT_EXTRA_ARGS=--fix $(MAKE) lint + +## -------------------------------------- +## Generate +## -------------------------------------- + +.PHONY: modules +modules: ## Runs go mod to ensure modules are up to date. + go mod tidy + cd $(TOOLS_DIR); go mod tidy + cd $(ENVTEST_DIR); go mod tidy + +.PHONY: generate +generate: $(CONTROLLER_GEN) ## Runs controller-gen for internal types for config file + $(CONTROLLER_GEN) object paths="./pkg/config/v1alpha1/...;./examples/configfile/custom/v1alpha1/..." + +## -------------------------------------- +## Cleanup / Verification +## -------------------------------------- + +.PHONY: clean +clean: ## Cleanup. + $(MAKE) clean-bin + +.PHONY: clean-bin +clean-bin: ## Remove all generated binaries. + rm -rf hack/tools/bin + +.PHONY: verify-modules +verify-modules: modules + @if !(git diff --quiet HEAD -- go.sum go.mod); then \ + echo "go module files are out of date, please run 'make modules'"; exit 1; \ + fi diff --git a/vendor/sigs.k8s.io/controller-runtime/OWNERS b/vendor/sigs.k8s.io/controller-runtime/OWNERS new file mode 100644 index 000000000..4b1fa044b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +approvers: + - controller-runtime-admins + - controller-runtime-maintainers + - controller-runtime-approvers +reviewers: + - controller-runtime-admins + - controller-runtime-reviewers + - controller-runtime-approvers diff --git a/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES new file mode 100644 index 000000000..82f7a2bef --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES @@ -0,0 +1,41 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +aliases: + # active folks who can be contacted to perform admin-related + # tasks on the repo, or otherwise approve any PRS. + controller-runtime-admins: + - droot + - mengqiy + - pwittrock + + # non-admin folks who have write-access and can approve any PRs in the repo + controller-runtime-maintainers: + - vincepri + - joelanford + + # non-admin folks who can approve any PRs in the repo + controller-runtime-approvers: + - gerred + - shawn-hurley + - alvaroaleman + + # folks who can review and LGTM any PRs in the repo (doesn't + # include approvers & admins -- those count too via the OWNERS + # file) + controller-runtime-reviewers: + - alenkacz + - vincepri + - alexeldeib + - varshaprasad96 + - fillzpp + + # folks to can approve things in the directly-ported + # testing_frameworks portions of the codebase + testing-integration-approvers: + - apelisse + - hoegaarden + + # folks who may have context on ancient history, + # but are no longer directly involved + controller-runtime-emeritus-maintainers: + - directxman12 diff --git a/vendor/sigs.k8s.io/controller-runtime/README.md b/vendor/sigs.k8s.io/controller-runtime/README.md new file mode 100644 index 000000000..cd358b94f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/README.md @@ -0,0 +1,66 @@ +[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/controller-runtime)](https://goreportcard.com/report/sigs.k8s.io/controller-runtime) +[![godoc](https://pkg.go.dev/badge/sigs.k8s.io/controller-runtime)](https://pkg.go.dev/sigs.k8s.io/controller-runtime) + +# Kubernetes controller-runtime Project + +The Kubernetes controller-runtime Project is a set of go libraries for building +Controllers. It is leveraged by [Kubebuilder](https://book.kubebuilder.io/) and +[Operator SDK](https://github.com/operator-framework/operator-sdk). Both are +a great place to start for new projects. See +[Kubebuilder's Quick Start](https://book.kubebuilder.io/quick-start.html) to +see how it can be used. + +Documentation: + +- [Package overview](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg) +- [Basic controller using builder](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/builder#example-Builder) +- [Creating a manager](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#example-New) +- [Creating a controller](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/controller#example-New) +- [Examples](https://github.com/kubernetes-sigs/controller-runtime/blob/master/examples) +- [Designs](https://github.com/kubernetes-sigs/controller-runtime/blob/master/designs) + +# Versioning, Maintenance, and Compatibility + +The full documentation can be found at [VERSIONING.md](VERSIONING.md), but TL;DR: + +Users: + +- We follow [Semantic Versioning (semver)](https://semver.org) +- Use releases with your dependency management to ensure that you get compatible code +- The master branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended) + +Contributors: + +- All code PR must be labeled with :bug: (patch fixes), :sparkles: (backwards-compatible features), or :warning: (breaking changes) +- Breaking changes will find their way into the next major release, other changes will go into an semi-immediate patch or minor release +- For a quick PR template suggesting the right information, use one of these PR templates: + * [Breaking Changes/Features](/.github/PULL_REQUEST_TEMPLATE/breaking_change.md) + * [Backwards-Compatible Features](/.github/PULL_REQUEST_TEMPLATE/compat_feature.md) + * [Bug fixes](/.github/PULL_REQUEST_TEMPLATE/bug_fix.md) + * [Documentation Changes](/.github/PULL_REQUEST_TEMPLATE/docs.md) + * [Test/Build/Other Changes](/.github/PULL_REQUEST_TEMPLATE/other.md) + +## FAQ + +See [FAQ.md](FAQ.md) + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +controller-runtime is a subproject of the [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) project +in sig apimachinery. + +You can reach the maintainers of this project at: + +- Slack channel: [#kubebuilder](http://slack.k8s.io/#kubebuilder) +- Google Group: [kubebuilder@googlegroups.com](https://groups.google.com/forum/#!forum/kubebuilder) + +## Contributing +Contributions are greatly appreciated. The maintainers actively manage the issues list, and try to highlight issues suitable for newcomers. +The project follows the typical GitHub pull request model. See [CONTRIBUTING.md](CONTRIBUTING.md) for more details. +Before starting any work, please either comment on an existing issue, or file a new one. + +## Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). diff --git a/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS new file mode 100644 index 000000000..32e6a3b90 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/SECURITY_CONTACTS @@ -0,0 +1,14 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +pwittrock +droot diff --git a/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md new file mode 100644 index 000000000..b3cfc6651 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/TMP-LOGGING.md @@ -0,0 +1,169 @@ +Logging Guidelines +================== + +controller-runtime uses a kind of logging called *structured logging*. If +you've used a library like Zap or logrus before, you'll be familiar with +the concepts we use. If you've only used a logging library like the "log" +package (in the Go standard library) or "glog" (in Kubernetes), you'll +need to adjust how you think about logging a bit. + +### Getting Started With Structured Logging + +With structured logging, we associate a *constant* log message with some +variable key-value pairs. For instance, suppose we wanted to log that we +were starting reconciliation on a pod. In the Go standard library logger, +we might write: + +```go +log.Printf("starting reconciliation for pod %s/%s", podNamespace, podName) +``` + +In controller-runtime, we'd instead write: + +```go +logger.Info("starting reconciliation", "pod", req.NamespacedNamed) +``` + +or even write + +```go +func (r *Reconciler) Reconcile(req reconcile.Request) (reconcile.Response, error) { + logger := logger.WithValues("pod", req.NamespacedName) + // do some stuff + logger.Info("starting reconciliation") +} +``` + +Notice how we've broken out the information that we want to convey into +a constant message (`"starting reconciliation"`) and some key-value pairs +that convey variable information (`"pod", req.NamespacedName`). We've +there-by added "structure" to our logs, which makes them easier to save +and search later, as well as correlate with metrics and events. + +All of controller-runtime's logging is done via +[logr](https://github.com/go-logr/logr), a generic interface for +structured logging. You can use whichever logging library you want to +implement the actual mechanics of the logging. controller-runtime +provides some helpers to make it easy to use +[Zap](https://go.uber.org/zap) as the implementation. + +You can configure the logging implementation using +`"sigs.k8s.io/controller-runtime/pkg/log".SetLogger`. That +package also contains the convenience functions for setting up Zap. + +You can get a handle to the the "root" logger using +`"sigs.k8s.io/controller-runtime/pkg/log".Log`, and can then call +`WithName` to create individual named loggers. You can call `WithName` +repeatedly to chain names together: + +```go +logger := log.Log.WithName("controller").WithName("replicaset") +// in reconcile... +logger = logger.WithValues("replicaset", req.NamespacedName) +// later on in reconcile... +logger.Info("doing things with pods", "pod", newPod) +``` + +As seen above, you can also call `WithValue` to create a new sub-logger +that always attaches some key-value pairs to a logger. + +Finally, you can use `V(1)` to mark a particular log line as "debug" logs: + +```go +logger.V(1).Info("this is particularly verbose!", "state of the world", +allKubernetesObjectsEverywhere) +``` + +While it's possible to use higher log levels, it's recommended that you +stick with `V(1)` or `V(0)` (which is equivalent to not specifying `V`), +and then filter later based on key-value pairs or messages; different +numbers tend to lose meaning easily over time, and you'll be left +wondering why particular logs lines are at `V(5)` instead of `V(7)`. + +## Logging errors + +Errors should *always* be logged with `log.Error`, which allows logr +implementations to provide special handling of errors (for instance, +providing stack traces in debug mode). + +It's acceptable to log call `log.Error` with a nil error object. This +conveys that an error occurred in some capacity, but that no actual +`error` object was involved. + +Errors returned by the `Reconcile` implementation of the `Reconciler` interface are commonly logged as a `Reconciler error`. +It's a developer choice to create an additional error log in the `Reconcile` implementation so a more specific file name and line for the error are returned. + +## Logging messages + +- Don't put variable content in your messages -- use key-value pairs for + that. Never use `fmt.Sprintf` in your message. + +- Try to match the terminology in your messages with your key-value pairs + -- for instance, if you have a key-value pairs `api version`, use the + term `APIVersion` instead of `GroupVersion` in your message. + +## Logging Kubernetes Objects + +Kubernetes objects should be logged directly, like `log.Info("this is +a Kubernetes object", "pod", somePod)`. controller-runtime provides +a special encoder for Zap that will transform Kubernetes objects into +`name, namespace, apiVersion, kind` objects, when available and not in +development mode. Other logr implementations should implement similar +logic. + +## Logging Structured Values (Key-Value pairs) + +- Use lower-case, space separated keys. For example `object` for objects, + `api version` for `APIVersion` + +- Be consistent across your application, and with controller-runtime when + possible. + +- Try to be brief but descriptive. + +- Match terminology in keys with terminology in the message. + +- Be careful logging non-Kubernetes objects verbatim if they're very + large. + +### Groups, Versions, and Kinds + +- Kinds should not be logged alone (they're meaningless alone). Use + a `GroupKind` object to log them instead, or a `GroupVersionKind` when + version is relevant. + +- If you need to log an API version string, use `api version` as the key + (formatted as with a `GroupVersion`, or as received directly from API + discovery). + +### Objects and Types + +- If code works with a generic Kubernetes `runtime.Object`, use the + `object` key. For specific objects, prefer the resource name as the key + (e.g. `pod` for `v1.Pod` objects). + +- For non-Kubernetes objects, the `object` key may also be used, if you + accept a generic interface. + +- When logging a raw type, log it using the `type` key, with a value of + `fmt.Sprintf("%T", typ)` + +- If there's specific context around a type, the key may be more specific, + but should end with `type` -- for instance, `OwnerType` should be logged + as `owner` in the context of `log.Error(err, "Could not get ObjectKinds + for OwnerType", `owner type`, fmt.Sprintf("%T"))`. When possible, favor + communicating kind instead. + +### Multiple things + +- When logging multiple things, simply pluralize the key. + +### controller-runtime Specifics + +- Reconcile requests should be logged as `request`, although normal code + should favor logging the key. + +- Reconcile keys should be logged as with the same key as if you were + logging the object directly (e.g. `log.Info("reconciling pod", "pod", + req.NamespacedName)`). This ends up having a similar effect to logging + the object directly. diff --git a/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md new file mode 100644 index 000000000..18779000e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md @@ -0,0 +1,30 @@ +# Versioning and Branching in controller-runtime + +We follow the [common KubeBuilder versioning guidelines][guidelines], and +use the corresponding tooling. + +For the purposes of the aforementioned guidelines, controller-runtime +counts as a "library project", but otherwise follows the guidelines +exactly. + +[guidelines]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md + +## Compatiblity and Release Support + +For release branches, we generally tend to support backporting one (1) +major release (`release-{X-1}` or `release-0.{Y-1}`), but may go back +further if the need arises and is very pressing (e.g. security updates). + +### Dependency Support + +Note the [guidelines on dependency versions][dep-versions]. Particularly: + +- We **DO** guarantee Kubernetes REST API compability -- if a given + version of controller-runtime stops working with what should be + a supported version of Kubernetes, this is almost certainly a bug. + +- We **DO NOT** guarantee any particular compability matrix between + kubernetes library dependencies (client-go, apimachinery, etc); Such + compability is infeasible due to the way those libraries are versioned. + +[dep-versions]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md#kubernetes-version-compatibility diff --git a/vendor/sigs.k8s.io/controller-runtime/alias.go b/vendor/sigs.k8s.io/controller-runtime/alias.go new file mode 100644 index 000000000..29f964dcb --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/alias.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerruntime + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client/config" + cfg "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Builder builds an Application ControllerManagedBy (e.g. Operator) and returns a manager.Manager to start it. +type Builder = builder.Builder + +// Request contains the information necessary to reconcile a Kubernetes object. This includes the +// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about +// any specific Event or the object contents itself. +type Request = reconcile.Request + +// Result contains the result of a Reconciler invocation. +type Result = reconcile.Result + +// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. +// A Manager is required to create Controllers. +type Manager = manager.Manager + +// Options are the arguments for creating a new Manager. +type Options = manager.Options + +// SchemeBuilder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type SchemeBuilder = scheme.Builder + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +type GroupVersion = schema.GroupVersion + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types. +type GroupResource = schema.GroupResource + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false +type TypeMeta = metav1.TypeMeta + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta = metav1.ObjectMeta + +var ( + // GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. + // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running + // in cluster and use the cluster provided kubeconfig. + // + // Will log an error and exit if there is an error creating the rest.Config. + GetConfigOrDie = config.GetConfigOrDie + + // GetConfig creates a *rest.Config for talking to a Kubernetes apiserver. + // If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running + // in cluster and use the cluster provided kubeconfig. + // + // Config precedence + // + // * --kubeconfig flag pointing at a file + // + // * KUBECONFIG environment variable pointing at a file + // + // * In-cluster config if running in cluster + // + // * $HOME/.kube/config if exists. + GetConfig = config.GetConfig + + // ConfigFile returns the cfg.File function for deferred config file loading, + // this is passed into Options{}.From() to populate the Options fields for + // the manager. + ConfigFile = cfg.File + + // NewControllerManagedBy returns a new controller builder that will be started by the provided Manager. + NewControllerManagedBy = builder.ControllerManagedBy + + // NewWebhookManagedBy returns a new webhook builder that will be started by the provided Manager. + NewWebhookManagedBy = builder.WebhookManagedBy + + // NewManager returns a new Manager for creating Controllers. + NewManager = manager.New + + // CreateOrUpdate creates or updates the given object obj in the Kubernetes + // cluster. The object's desired state should be reconciled with the existing + // state using the passed in ReconcileFn. obj must be a struct pointer so that + // obj can be updated with the content returned by the Server. + // + // It returns the executed operation and an error. + CreateOrUpdate = controllerutil.CreateOrUpdate + + // SetControllerReference sets owner as a Controller OwnerReference on owned. + // This is used for garbage collection of the owned object and for + // reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner). + // Since only one OwnerReference can be a controller, it returns an error if + // there is another OwnerReference with Controller flag set. + SetControllerReference = controllerutil.SetControllerReference + + // SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned + // which is closed on one of these signals. If a second signal is caught, the program + // is terminated with exit code 1. + SetupSignalHandler = signals.SetupSignalHandler + + // Log is the base logger used by controller-runtime. It delegates + // to another logr.Logger. You *must* call SetLogger to + // get any actual logging. + Log = log.Log + + // LoggerFrom returns a logger with predefined values from a context.Context. + // The logger, when used with controllers, can be expected to contain basic information about the object + // that's being reconciled like: + // - `reconciler group` and `reconciler kind` coming from the For(...) object passed in when building a controller. + // - `name` and `namespace` injected from the reconciliation request. + // + // This is meant to be used with the context supplied in a struct that satisfies the Reconciler interface. + LoggerFrom = log.FromContext + + // LoggerInto takes a context and sets the logger as one of its keys. + // + // This is meant to be used in reconcilers to enrich the logger within a context with additional values. + LoggerInto = log.IntoContext + + // SetLogger sets a concrete logging implementation for all deferred Loggers. + SetLogger = log.SetLogger +) diff --git a/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md b/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md new file mode 100644 index 000000000..0d15c00cf --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/controller-runtime/doc.go b/vendor/sigs.k8s.io/controller-runtime/doc.go new file mode 100644 index 000000000..9f260c4f7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/doc.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controllerruntime provides tools to construct Kubernetes-style +// controllers that manipulate both Kubernetes CRDs and aggregated/built-in +// Kubernetes APIs. +// +// It defines easy helpers for the common use cases when building CRDs, built +// on top of customizable layers of abstraction. Common cases should be easy, +// and uncommon cases should be possible. In general, controller-runtime tries +// to guide users towards Kubernetes controller best-practices. +// +// Getting Started +// +// The main entrypoint for controller-runtime is this root package, which +// contains all of the common types needed to get started building controllers: +// import ( +// ctrl "sigs.k8s.io/controller-runtime" +// ) +// +// The examples in this package walk through a basic controller setup. The +// kubebuilder book (https://book.kubebuilder.io) has some more in-depth +// walkthroughs. +// +// controller-runtime favors structs with sane defaults over constructors, so +// it's fairly common to see structs being used directly in controller-runtime. +// +// Organization +// +// A brief-ish walkthrough of the layout of this library can be found below. Each +// package contains more information about how to use it. +// +// Frequently asked questions about using controller-runtime and designing +// controllers can be found at +// https://github.com/kubernetes-sigs/controller-runtime/blob/master/FAQ.md. +// +// Managers +// +// Every controller and webhook is ultimately run by a Manager (pkg/manager). A +// manager is responsible for running controllers and webhooks, and setting up +// common dependencies (pkg/runtime/inject), like shared caches and clients, as +// well as managing leader election (pkg/leaderelection). Managers are +// generally configured to gracefully shut down controllers on pod termination +// by wiring up a signal handler (pkg/manager/signals). +// +// Controllers +// +// Controllers (pkg/controller) use events (pkg/event) to eventually trigger +// reconcile requests. They may be constructed manually, but are often +// constructed with a Builder (pkg/builder), which eases the wiring of event +// sources (pkg/source), like Kubernetes API object changes, to event handlers +// (pkg/handler), like "enqueue a reconcile request for the object owner". +// Predicates (pkg/predicate) can be used to filter which events actually +// trigger reconciles. There are pre-written utilities for the common cases, and +// interfaces and helpers for advanced cases. +// +// Reconcilers +// +// Controller logic is implemented in terms of Reconcilers (pkg/reconcile). A +// Reconciler implements a function which takes a reconcile Request containing +// the name and namespace of the object to reconcile, reconciles the object, +// and returns a Response or an error indicating whether to requeue for a +// second round of processing. +// +// Clients and Caches +// +// Reconcilers use Clients (pkg/client) to access API objects. The default +// client provided by the manager reads from a local shared cache (pkg/cache) +// and writes directly to the API server, but clients can be constructed that +// only talk to the API server, without a cache. The Cache will auto-populate +// with watched objects, as well as when other structured objects are +// requested. The default split client does not promise to invalidate the cache +// during writes (nor does it promise sequential create/get coherence), and code +// should not assume a get immediately following a create/update will return +// the updated resource. Caches may also have indexes, which can be created via +// a FieldIndexer (pkg/client) obtained from the manager. Indexes can used to +// quickly and easily look up all objects with certain fields set. Reconcilers +// may retrieve event recorders (pkg/recorder) to emit events using the +// manager. +// +// Schemes +// +// Clients, Caches, and many other things in Kubernetes use Schemes +// (pkg/scheme) to associate Go types to Kubernetes API Kinds +// (Group-Version-Kinds, to be specific). +// +// Webhooks +// +// Similarly, webhooks (pkg/webhook/admission) may be implemented directly, but +// are often constructed using a builder (pkg/webhook/admission/builder). They +// are run via a server (pkg/webhook) which is managed by a Manager. +// +// Logging and Metrics +// +// Logging (pkg/log) in controller-runtime is done via structured logs, using a +// log set of interfaces called logr +// (https://pkg.go.dev/github.com/go-logr/logr). While controller-runtime +// provides easy setup for using Zap (https://go.uber.org/zap, pkg/log/zap), +// you can provide any implementation of logr as the base logger for +// controller-runtime. +// +// Metrics (pkg/metrics) provided by controller-runtime are registered into a +// controller-runtime-specific Prometheus metrics registry. The manager can +// serve these by an HTTP endpoint, and additional metrics may be registered to +// this Registry as normal. +// +// Testing +// +// You can easily build integration and unit tests for your controllers and +// webhooks using the test Environment (pkg/envtest). This will automatically +// stand up a copy of etcd and kube-apiserver, and provide the correct options +// to connect to the API server. It's designed to work well with the Ginkgo +// testing framework, but should work with any testing setup. +package controllerruntime diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go new file mode 100644 index 000000000..eedda4932 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/controller.go @@ -0,0 +1,335 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "fmt" + "strings" + + "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// Supporting mocking out functions for testing. +var newController = controller.New +var getGvk = apiutil.GVKForObject + +// project represents other forms that the we can use to +// send/receive a given resource (metadata-only, unstructured, etc). +type objectProjection int + +const ( + // projectAsNormal doesn't change the object from the form given. + projectAsNormal objectProjection = iota + // projectAsMetadata turns this into an metadata-only watch. + projectAsMetadata +) + +// Builder builds a Controller. +type Builder struct { + forInput ForInput + ownsInput []OwnsInput + watchesInput []WatchesInput + mgr manager.Manager + globalPredicates []predicate.Predicate + ctrl controller.Controller + ctrlOptions controller.Options + name string +} + +// ControllerManagedBy returns a new controller builder that will be started by the provided Manager. +func ControllerManagedBy(m manager.Manager) *Builder { + return &Builder{mgr: m} +} + +// ForInput represents the information set by For method. +type ForInput struct { + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection + err error +} + +// For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / +// update events by *reconciling the object*. +// This is the equivalent of calling +// Watches(&source.Kind{Type: apiType}, &handler.EnqueueRequestForObject{}). +func (blder *Builder) For(object client.Object, opts ...ForOption) *Builder { + if blder.forInput.object != nil { + blder.forInput.err = fmt.Errorf("For(...) should only be called once, could not assign multiple objects for reconciliation") + return blder + } + input := ForInput{object: object} + for _, opt := range opts { + opt.ApplyToFor(&input) + } + + blder.forInput = input + return blder +} + +// OwnsInput represents the information set by Owns method. +type OwnsInput struct { + object client.Object + predicates []predicate.Predicate + objectProjection objectProjection +} + +// Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to +// create / delete / update events by *reconciling the owner object*. This is the equivalent of calling +// Watches(&source.Kind{Type: }, &handler.EnqueueRequestForOwner{OwnerType: apiType, IsController: true}). +func (blder *Builder) Owns(object client.Object, opts ...OwnsOption) *Builder { + input := OwnsInput{object: object} + for _, opt := range opts { + opt.ApplyToOwns(&input) + } + + blder.ownsInput = append(blder.ownsInput, input) + return blder +} + +// WatchesInput represents the information set by Watches method. +type WatchesInput struct { + src source.Source + eventhandler handler.EventHandler + predicates []predicate.Predicate + objectProjection objectProjection +} + +// Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using +// Owns or For instead of Watches directly. +// Specified predicates are registered only for given source. +func (blder *Builder) Watches(src source.Source, eventhandler handler.EventHandler, opts ...WatchesOption) *Builder { + input := WatchesInput{src: src, eventhandler: eventhandler} + for _, opt := range opts { + opt.ApplyToWatches(&input) + } + + blder.watchesInput = append(blder.watchesInput, input) + return blder +} + +// WithEventFilter sets the event filters, to filter which create/update/delete/generic events eventually +// trigger reconciliations. For example, filtering on whether the resource version has changed. +// Given predicate is added for all watched objects. +// Defaults to the empty list. +func (blder *Builder) WithEventFilter(p predicate.Predicate) *Builder { + blder.globalPredicates = append(blder.globalPredicates, p) + return blder +} + +// WithOptions overrides the controller options use in doController. Defaults to empty. +func (blder *Builder) WithOptions(options controller.Options) *Builder { + blder.ctrlOptions = options + return blder +} + +// WithLogConstructor overrides the controller options's LogConstructor. +func (blder *Builder) WithLogConstructor(logConstructor func(*reconcile.Request) logr.Logger) *Builder { + blder.ctrlOptions.LogConstructor = logConstructor + return blder +} + +// Named sets the name of the controller to the given name. The name shows up +// in metrics, among other things, and thus should be a prometheus compatible name +// (underscores and alphanumeric characters only). +// +// By default, controllers are named using the lowercase version of their kind. +func (blder *Builder) Named(name string) *Builder { + blder.name = name + return blder +} + +// Complete builds the Application Controller. +func (blder *Builder) Complete(r reconcile.Reconciler) error { + _, err := blder.Build(r) + return err +} + +// Build builds the Application Controller and returns the Controller it created. +func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, error) { + if r == nil { + return nil, fmt.Errorf("must provide a non-nil Reconciler") + } + if blder.mgr == nil { + return nil, fmt.Errorf("must provide a non-nil Manager") + } + if blder.forInput.err != nil { + return nil, blder.forInput.err + } + // Checking the reconcile type exist or not + if blder.forInput.object == nil { + return nil, fmt.Errorf("must provide an object for reconciliation") + } + + // Set the ControllerManagedBy + if err := blder.doController(r); err != nil { + return nil, err + } + + // Set the Watch + if err := blder.doWatch(); err != nil { + return nil, err + } + + return blder.ctrl, nil +} + +func (blder *Builder) project(obj client.Object, proj objectProjection) (client.Object, error) { + switch proj { + case projectAsNormal: + return obj, nil + case projectAsMetadata: + metaObj := &metav1.PartialObjectMetadata{} + gvk, err := getGvk(obj, blder.mgr.GetScheme()) + if err != nil { + return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err) + } + metaObj.SetGroupVersionKind(gvk) + return metaObj, nil + default: + panic(fmt.Sprintf("unexpected projection type %v on type %T, should not be possible since this is an internal field", proj, obj)) + } +} + +func (blder *Builder) doWatch() error { + // Reconcile type + typeForSrc, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} + hdler := &handler.EnqueueRequestForObject{} + allPredicates := append(blder.globalPredicates, blder.forInput.predicates...) + if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { + return err + } + + // Watches the managed types + for _, own := range blder.ownsInput { + typeForSrc, err := blder.project(own.object, own.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} + hdler := &handler.EnqueueRequestForOwner{ + OwnerType: blder.forInput.object, + IsController: true, + } + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) + allPredicates = append(allPredicates, own.predicates...) + if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { + return err + } + } + + // Do the watch requests + for _, w := range blder.watchesInput { + allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) + allPredicates = append(allPredicates, w.predicates...) + + // If the source of this watch is of type *source.Kind, project it. + if srckind, ok := w.src.(*source.Kind); ok { + typeForSrc, err := blder.project(srckind.Type, w.objectProjection) + if err != nil { + return err + } + srckind.Type = typeForSrc + } + + if err := blder.ctrl.Watch(w.src, w.eventhandler, allPredicates...); err != nil { + return err + } + } + return nil +} + +func (blder *Builder) getControllerName(gvk schema.GroupVersionKind) string { + if blder.name != "" { + return blder.name + } + return strings.ToLower(gvk.Kind) +} + +func (blder *Builder) doController(r reconcile.Reconciler) error { + globalOpts := blder.mgr.GetControllerOptions() + + ctrlOptions := blder.ctrlOptions + if ctrlOptions.Reconciler == nil { + ctrlOptions.Reconciler = r + } + + // Retrieve the GVK from the object we're reconciling + // to prepopulate logger information, and to optionally generate a default name. + gvk, err := getGvk(blder.forInput.object, blder.mgr.GetScheme()) + if err != nil { + return err + } + + // Setup concurrency. + if ctrlOptions.MaxConcurrentReconciles == 0 { + groupKind := gvk.GroupKind().String() + + if concurrency, ok := globalOpts.GroupKindConcurrency[groupKind]; ok && concurrency > 0 { + ctrlOptions.MaxConcurrentReconciles = concurrency + } + } + + // Setup cache sync timeout. + if ctrlOptions.CacheSyncTimeout == 0 && globalOpts.CacheSyncTimeout != nil { + ctrlOptions.CacheSyncTimeout = *globalOpts.CacheSyncTimeout + } + + controllerName := blder.getControllerName(gvk) + + // Setup the logger. + if ctrlOptions.LogConstructor == nil { + log := blder.mgr.GetLogger().WithValues( + "controller", controllerName, + "controllerGroup", gvk.Group, + "controllerKind", gvk.Kind, + ) + + lowerCamelCaseKind := strings.ToLower(gvk.Kind[:1]) + gvk.Kind[1:] + + ctrlOptions.LogConstructor = func(req *reconcile.Request) logr.Logger { + log := log + if req != nil { + log = log.WithValues( + lowerCamelCaseKind, klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + ) + } + return log + } + } + + // Build the controller and return. + blder.ctrl, err = newController(controllerName, blder.mgr, ctrlOptions) + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go new file mode 100644 index 000000000..e4df1b709 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package builder wraps other controller-runtime libraries and exposes simple +// patterns for building common Controllers. +// +// Projects built with the builder package can trivially be rebased on top of the underlying +// packages if the project requires more customized behavior in the future. +package builder + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("builder") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go new file mode 100644 index 000000000..c738ba7d1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/options.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// {{{ "Functional" Option Interfaces + +// ForOption is some configuration that modifies options for a For request. +type ForOption interface { + // ApplyToFor applies this configuration to the given for input. + ApplyToFor(*ForInput) +} + +// OwnsOption is some configuration that modifies options for a owns request. +type OwnsOption interface { + // ApplyToOwns applies this configuration to the given owns input. + ApplyToOwns(*OwnsInput) +} + +// WatchesOption is some configuration that modifies options for a watches request. +type WatchesOption interface { + // ApplyToWatches applies this configuration to the given watches options. + ApplyToWatches(*WatchesInput) +} + +// }}} + +// {{{ Multi-Type Options + +// WithPredicates sets the given predicates list. +func WithPredicates(predicates ...predicate.Predicate) Predicates { + return Predicates{ + predicates: predicates, + } +} + +// Predicates filters events before enqueuing the keys. +type Predicates struct { + predicates []predicate.Predicate +} + +// ApplyToFor applies this configuration to the given ForInput options. +func (w Predicates) ApplyToFor(opts *ForInput) { + opts.predicates = w.predicates +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (w Predicates) ApplyToOwns(opts *OwnsInput) { + opts.predicates = w.predicates +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (w Predicates) ApplyToWatches(opts *WatchesInput) { + opts.predicates = w.predicates +} + +var _ ForOption = &Predicates{} +var _ OwnsOption = &Predicates{} +var _ WatchesOption = &Predicates{} + +// }}} + +// {{{ For & Owns Dual-Type options + +// asProjection configures the projection (currently only metadata) on the input. +// Currently only metadata is supported. We might want to expand +// this to arbitrary non-special local projections in the future. +type projectAs objectProjection + +// ApplyToFor applies this configuration to the given ForInput options. +func (p projectAs) ApplyToFor(opts *ForInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (p projectAs) ApplyToOwns(opts *OwnsInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (p projectAs) ApplyToWatches(opts *WatchesInput) { + opts.objectProjection = objectProjection(p) +} + +var ( + // OnlyMetadata tells the controller to *only* cache metadata, and to watch + // the the API server in metadata-only form. This is useful when watching + // lots of objects, really big objects, or objects for which you only know + // the the GVK, but not the structure. You'll need to pass + // metav1.PartialObjectMetadata to the client when fetching objects in your + // reconciler, otherwise you'll end up with a duplicate structured or + // unstructured cache. + // + // When watching a resource with OnlyMetadata, for example the v1.Pod, you + // should not Get and List using the v1.Pod type. Instead, you should use + // the special metav1.PartialObjectMetadata type. + // + // ❌ Incorrect: + // + // pod := &v1.Pod{} + // mgr.GetClient().Get(ctx, nsAndName, pod) + // + // ✅ Correct: + // + // pod := &metav1.PartialObjectMetadata{} + // pod.SetGroupVersionKind(schema.GroupVersionKind{ + // Group: "", + // Version: "v1", + // Kind: "Pod", + // }) + // mgr.GetClient().Get(ctx, nsAndName, pod) + // + // In the first case, controller-runtime will create another cache for the + // concrete type on top of the metadata cache; this increases memory + // consumption and leads to race conditions as caches are not in sync. + OnlyMetadata = projectAs(projectAsMetadata) + + _ ForOption = OnlyMetadata + _ OwnsOption = OnlyMetadata + _ WatchesOption = OnlyMetadata +) + +// }}} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go new file mode 100644 index 000000000..18feb1cd7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go @@ -0,0 +1,209 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "errors" + "net/http" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "sigs.k8s.io/controller-runtime/pkg/webhook/conversion" +) + +// WebhookBuilder builds a Webhook. +type WebhookBuilder struct { + apiType runtime.Object + withDefaulter admission.CustomDefaulter + withValidator admission.CustomValidator + gvk schema.GroupVersionKind + mgr manager.Manager + config *rest.Config +} + +// WebhookManagedBy allows inform its manager.Manager. +func WebhookManagedBy(m manager.Manager) *WebhookBuilder { + return &WebhookBuilder{mgr: m} +} + +// TODO(droot): update the GoDoc for conversion. + +// For takes a runtime.Object which should be a CR. +// If the given object implements the admission.Defaulter interface, a MutatingWebhook will be wired for this type. +// If the given object implements the admission.Validator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) For(apiType runtime.Object) *WebhookBuilder { + blder.apiType = apiType + return blder +} + +// WithDefaulter takes a admission.WithDefaulter interface, a MutatingWebhook will be wired for this type. +func (blder *WebhookBuilder) WithDefaulter(defaulter admission.CustomDefaulter) *WebhookBuilder { + blder.withDefaulter = defaulter + return blder +} + +// WithValidator takes a admission.WithValidator interface, a ValidatingWebhook will be wired for this type. +func (blder *WebhookBuilder) WithValidator(validator admission.CustomValidator) *WebhookBuilder { + blder.withValidator = validator + return blder +} + +// Complete builds the webhook. +func (blder *WebhookBuilder) Complete() error { + // Set the Config + blder.loadRestConfig() + + // Set the Webhook if needed + return blder.registerWebhooks() +} + +func (blder *WebhookBuilder) loadRestConfig() { + if blder.config == nil { + blder.config = blder.mgr.GetConfig() + } +} + +func (blder *WebhookBuilder) registerWebhooks() error { + typ, err := blder.getType() + if err != nil { + return err + } + + // Create webhook(s) for each type + blder.gvk, err = apiutil.GVKForObject(typ, blder.mgr.GetScheme()) + if err != nil { + return err + } + + blder.registerDefaultingWebhook() + blder.registerValidatingWebhook() + + err = blder.registerConversionWebhook() + if err != nil { + return err + } + return nil +} + +// registerDefaultingWebhook registers a defaulting webhook if th. +func (blder *WebhookBuilder) registerDefaultingWebhook() { + mwh := blder.getDefaultingWebhook() + if mwh != nil { + path := generateMutatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a mutating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, mwh) + } + } +} + +func (blder *WebhookBuilder) getDefaultingWebhook() *admission.Webhook { + if defaulter := blder.withDefaulter; defaulter != nil { + return admission.WithCustomDefaulter(blder.apiType, defaulter) + } + if defaulter, ok := blder.apiType.(admission.Defaulter); ok { + return admission.DefaultingWebhookFor(defaulter) + } + log.Info( + "skip registering a mutating webhook, object does not implement admission.Defaulter or WithDefaulter wasn't called", + "GVK", blder.gvk) + return nil +} + +func (blder *WebhookBuilder) registerValidatingWebhook() { + vwh := blder.getValidatingWebhook() + if vwh != nil { + path := generateValidatePath(blder.gvk) + + // Checking if the path is already registered. + // If so, just skip it. + if !blder.isAlreadyHandled(path) { + log.Info("Registering a validating webhook", + "GVK", blder.gvk, + "path", path) + blder.mgr.GetWebhookServer().Register(path, vwh) + } + } +} + +func (blder *WebhookBuilder) getValidatingWebhook() *admission.Webhook { + if validator := blder.withValidator; validator != nil { + return admission.WithCustomValidator(blder.apiType, validator) + } + if validator, ok := blder.apiType.(admission.Validator); ok { + return admission.ValidatingWebhookFor(validator) + } + log.Info( + "skip registering a validating webhook, object does not implement admission.Validator or WithValidator wasn't called", + "GVK", blder.gvk) + return nil +} + +func (blder *WebhookBuilder) registerConversionWebhook() error { + ok, err := conversion.IsConvertible(blder.mgr.GetScheme(), blder.apiType) + if err != nil { + log.Error(err, "conversion check failed", "GVK", blder.gvk) + return err + } + if ok { + if !blder.isAlreadyHandled("/convert") { + blder.mgr.GetWebhookServer().Register("/convert", &conversion.Webhook{}) + } + log.Info("Conversion webhook enabled", "GVK", blder.gvk) + } + + return nil +} + +func (blder *WebhookBuilder) getType() (runtime.Object, error) { + if blder.apiType != nil { + return blder.apiType, nil + } + return nil, errors.New("For() must be called with a valid object") +} + +func (blder *WebhookBuilder) isAlreadyHandled(path string) bool { + if blder.mgr.GetWebhookServer().WebhookMux == nil { + return false + } + h, p := blder.mgr.GetWebhookServer().WebhookMux.Handler(&http.Request{URL: &url.URL{Path: path}}) + if p == path && h != nil { + return true + } + return false +} + +func generateMutatePath(gvk schema.GroupVersionKind) string { + return "/mutate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} + +func generateValidatePath(gvk schema.GroupVersionKind) string { + return "/validate-" + strings.ReplaceAll(gvk.Group, ".", "-") + "-" + + gvk.Version + "-" + strings.ToLower(gvk.Kind) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go new file mode 100644 index 000000000..2a8f53347 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -0,0 +1,275 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("object-cache") + +// Cache knows how to load Kubernetes objects, fetch informers to request +// to receive events for Kubernetes objects (at a low-level), +// and add indices to fields on the objects stored in the cache. +type Cache interface { + // Cache acts as a client to objects stored in the cache. + client.Reader + + // Cache loads informers and adds field indices. + Informers +} + +// Informers knows how to create or fetch informers for different +// group-version-kinds, and add indices to those informers. It's safe to call +// GetInformer from multiple threads. +type Informers interface { + // GetInformer fetches or constructs an informer for the given object that corresponds to a single + // API kind and resource. + GetInformer(ctx context.Context, obj client.Object) (Informer, error) + + // GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead + // of the underlying object. + GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) + + // Start runs all the informers known to this cache until the context is closed. + // It blocks. + Start(ctx context.Context) error + + // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. + WaitForCacheSync(ctx context.Context) bool + + // Informers knows how to add indices to the caches (informers) that it manages. + client.FieldIndexer +} + +// Informer - informer allows you interact with the underlying informer. +type Informer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + AddEventHandler(handler toolscache.ResourceEventHandler) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the + // specified resync period. Events to a single handler are delivered sequentially, but there is + // no coordination between different handlers. + AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(indexers toolscache.Indexers) error + // HasSynced return true if the informers underlying store has synced. + HasSynced() bool +} + +// ObjectSelector is an alias name of internal.Selector. +type ObjectSelector internal.Selector + +// SelectorsByObject associate a client.Object's GVK to a field/label selector. +// There is also `DefaultSelector` to set a global default (which will be overridden by +// a more specific setting here, if any). +type SelectorsByObject map[client.Object]ObjectSelector + +// Options are the optional arguments for creating a new InformersMap object. +type Options struct { + // Scheme is the scheme to use for mapping objects to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Resync is the base frequency the informers are resynced. + // Defaults to defaultResyncTime. + // A 10 percent jitter will be added to the Resync period between informers + // So that all informers will not send list requests simultaneously. + Resync *time.Duration + + // Namespace restricts the cache's ListWatch to the desired namespace + // Default watches all namespaces + Namespace string + + // SelectorsByObject restricts the cache's ListWatch to the desired + // fields per GVK at the specified object, the map's value must implement + // Selector [1] using for example a Set [2] + // [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector + // [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set + SelectorsByObject SelectorsByObject + + // DefaultSelector will be used as selectors for all object types + // that do not have a selector in SelectorsByObject defined. + DefaultSelector ObjectSelector + + // UnsafeDisableDeepCopyByObject indicates not to deep copy objects during get or + // list objects per GVK at the specified object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + UnsafeDisableDeepCopyByObject DisableDeepCopyByObject + + // TransformByObject is a map from GVKs to transformer functions which + // get applied when objects of the transformation are about to be committed + // to cache. + // + // This function is called both for new objects to enter the cache, + // and for updated objects. + TransformByObject TransformByObject + + // DefaultTransform is the transform used for all GVKs which do + // not have an explicit transform func set in TransformByObject + DefaultTransform toolscache.TransformFunc +} + +var defaultResyncTime = 10 * time.Hour + +// New initializes and returns a new Cache. +func New(config *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + selectorsByGVK, err := convertToSelectorsByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme) + if err != nil { + return nil, err + } + disableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(opts.UnsafeDisableDeepCopyByObject, opts.Scheme) + if err != nil { + return nil, err + } + transformByGVK, err := convertToTransformByKindAndGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme) + if err != nil { + return nil, err + } + + im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, selectorsByGVK, disableDeepCopyByGVK, transformByGVK) + return &informerCache{InformersMap: im}, nil +} + +// BuilderWithOptions returns a Cache constructor that will build the a cache +// honoring the options argument, this is useful to specify options like +// SelectorsByObject +// WARNING: if SelectorsByObject is specified. filtered out resources are not +// returned. +// WARNING: if UnsafeDisableDeepCopy is enabled, you must DeepCopy any object +// returned from cache get/list before mutating it. +func BuilderWithOptions(options Options) NewCacheFunc { + return func(config *rest.Config, opts Options) (Cache, error) { + if options.Scheme == nil { + options.Scheme = opts.Scheme + } + if options.Mapper == nil { + options.Mapper = opts.Mapper + } + if options.Resync == nil { + options.Resync = opts.Resync + } + if options.Namespace == "" { + options.Namespace = opts.Namespace + } + if opts.Resync == nil { + opts.Resync = options.Resync + } + + return New(config, options) + } +} + +func defaultOpts(config *rest.Config, opts Options) (Options, error) { + // Use the default Kubernetes Scheme if unset + if opts.Scheme == nil { + opts.Scheme = scheme.Scheme + } + + // Construct a new Mapper if unset + if opts.Mapper == nil { + var err error + opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + if err != nil { + log.WithName("setup").Error(err, "Failed to get API Group-Resources") + return opts, fmt.Errorf("could not create RESTMapper from config") + } + } + + // Default the resync period to 10 hours if unset + if opts.Resync == nil { + opts.Resync = &defaultResyncTime + } + return opts, nil +} + +func convertToSelectorsByGVK(selectorsByObject SelectorsByObject, defaultSelector ObjectSelector, scheme *runtime.Scheme) (internal.SelectorsByGVK, error) { + selectorsByGVK := internal.SelectorsByGVK{} + for object, selector := range selectorsByObject { + gvk, err := apiutil.GVKForObject(object, scheme) + if err != nil { + return nil, err + } + selectorsByGVK[gvk] = internal.Selector(selector) + } + selectorsByGVK[schema.GroupVersionKind{}] = internal.Selector(defaultSelector) + return selectorsByGVK, nil +} + +// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache. +type DisableDeepCopyByObject map[client.Object]bool + +var _ client.Object = &ObjectAll{} + +// ObjectAll is the argument to represent all objects' types. +type ObjectAll struct { + client.Object +} + +func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObject, scheme *runtime.Scheme) (internal.DisableDeepCopyByGVK, error) { + disableDeepCopyByGVK := internal.DisableDeepCopyByGVK{} + for obj, disable := range disableDeepCopyByObject { + switch obj.(type) { + case ObjectAll, *ObjectAll: + disableDeepCopyByGVK[internal.GroupVersionKindAll] = disable + default: + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return nil, err + } + disableDeepCopyByGVK[gvk] = disable + } + } + return disableDeepCopyByGVK, nil +} + +// TransformByObject associate a client.Object's GVK to a transformer function +// to be applied when storing the object into the cache. +type TransformByObject map[client.Object]toolscache.TransformFunc + +func convertToTransformByKindAndGVK(t TransformByObject, defaultTransform toolscache.TransformFunc, scheme *runtime.Scheme) (internal.TransformFuncByObject, error) { + result := internal.NewTransformFuncByObject() + for obj, transformation := range t { + if err := result.Set(obj, scheme, transformation); err != nil { + return nil, err + } + } + result.SetDefault(defaultTransform) + return result, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go new file mode 100644 index 000000000..e1742ac0f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache provides object caches that act as caching client.Reader +// instances and help drive Kubernetes-object-based event handlers. +package cache diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go new file mode 100644 index 000000000..ac07be2a6 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -0,0 +1,217 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "reflect" + "strings" + + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +var ( + _ Informers = &informerCache{} + _ client.Reader = &informerCache{} + _ Cache = &informerCache{} +) + +// ErrCacheNotStarted is returned when trying to read from the cache that wasn't started. +type ErrCacheNotStarted struct{} + +func (*ErrCacheNotStarted) Error() string { + return "the cache is not started, can not read objects" +} + +// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. +type informerCache struct { + *internal.InformersMap +} + +// Get implements Reader. +func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object) error { + gvk, err := apiutil.GVKForObject(out, ip.Scheme) + if err != nil { + return err + } + + started, cache, err := ip.InformersMap.Get(ctx, gvk, out) + if err != nil { + return err + } + + if !started { + return &ErrCacheNotStarted{} + } + return cache.Reader.Get(ctx, key, out) +} + +// List implements Reader. +func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { + gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) + if err != nil { + return err + } + + started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj) + if err != nil { + return err + } + + if !started { + return &ErrCacheNotStarted{} + } + + return cache.Reader.List(ctx, out, opts...) +} + +// objectTypeForListObject tries to find the runtime.Object and associated GVK +// for a single object corresponding to the passed-in list type. We need them +// because they are used as cache map key. +func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { + gvk, err := apiutil.GVKForObject(list, ip.Scheme) + if err != nil { + return nil, nil, err + } + + // we need the non-list GVK, so chop off the "List" from the end of the kind + if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + _, isUnstructured := list.(*unstructured.UnstructuredList) + var cacheTypeObj runtime.Object + if isUnstructured { + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + cacheTypeObj = u + } else { + itemsPtr, err := apimeta.GetItemsPtr(list) + if err != nil { + return nil, nil, err + } + // http://knowyourmeme.com/memes/this-is-fine + elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem() + if elemType.Kind() != reflect.Ptr { + elemType = reflect.PtrTo(elemType) + } + + cacheTypeValue := reflect.Zero(elemType) + var ok bool + cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object) + if !ok { + return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface()) + } + } + + return &gvk, cacheTypeObj, nil +} + +// GetInformerForKind returns the informer for the GroupVersionKind. +func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { + // Map the gvk to an object + obj, err := ip.Scheme.New(gvk) + if err != nil { + return nil, err + } + + _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + if err != nil { + return nil, err + } + return i.Informer, err +} + +// GetInformer returns the informer for the obj. +func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, ip.Scheme) + if err != nil { + return nil, err + } + + _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + if err != nil { + return nil, err + } + return i.Informer, err +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface +// to indicate that this can be started without requiring the leader lock. +func (ip *informerCache) NeedLeaderElection() bool { + return false +} + +// IndexField adds an indexer to the underlying cache, using extraction function to get +// value(s) from the given field. This index can then be used by passing a field selector +// to List. For one-to-one compatibility with "normal" field selectors, only return one value. +// The values may be anything. They will automatically be prefixed with the namespace of the +// given object, if present. The objects passed are guaranteed to be objects of the correct type. +func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + informer, err := ip.GetInformer(ctx, obj) + if err != nil { + return err + } + return indexByField(informer, field, extractValue) +} + +func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error { + indexFunc := func(objRaw interface{}) ([]string, error) { + // TODO(directxman12): check if this is the correct type? + obj, isObj := objRaw.(client.Object) + if !isObj { + return nil, fmt.Errorf("object of type %T is not an Object", objRaw) + } + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + ns := meta.GetNamespace() + + rawVals := extractor(obj) + var vals []string + if ns == "" { + // if we're not doubling the keys for the namespaced case, just create a new slice with same length + vals = make([]string, len(rawVals)) + } else { + // if we need to add non-namespaced versions too, double the length + vals = make([]string, len(rawVals)*2) + } + for i, rawVal := range rawVals { + // save a namespaced variant, so that we can ask + // "what are all the object matching a given index *in a given namespace*" + vals[i] = internal.KeyToNamespacedKey(ns, rawVal) + if ns != "" { + // if we have a namespace, also inject a special index key for listing + // regardless of the object namespace + vals[i+len(rawVals)] = internal.KeyToNamespacedKey("", rawVal) + } + } + + return vals, nil + } + + return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go new file mode 100644 index 000000000..b95af18d7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -0,0 +1,218 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "reflect" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CacheReader is a client.Reader. +var _ client.Reader = &CacheReader{} + +// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type. +type CacheReader struct { + // indexer is the underlying indexer wrapped by this cache. + indexer cache.Indexer + + // groupVersionKind is the group-version-kind of the resource. + groupVersionKind schema.GroupVersionKind + + // scopeName is the scope of the resource (namespaced or cluster-scoped). + scopeName apimeta.RESTScopeName + + // disableDeepCopy indicates not to deep copy objects during get or list objects. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + disableDeepCopy bool +} + +// Get checks the indexer for the object and writes a copy of it if found. +func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object) error { + if c.scopeName == apimeta.RESTScopeNameRoot { + key.Namespace = "" + } + storeKey := objectKeyToStoreKey(key) + + // Lookup the object from the indexer cache + obj, exists, err := c.indexer.GetByKey(storeKey) + if err != nil { + return err + } + + // Not found, return an error + if !exists { + // Resource gets transformed into Kind in the error anyway, so this is fine + return apierrors.NewNotFound(schema.GroupResource{ + Group: c.groupVersionKind.Group, + Resource: c.groupVersionKind.Kind, + }, key.Name) + } + + // Verify the result is a runtime.Object + if _, isObj := obj.(runtime.Object); !isObj { + // This should never happen + return fmt.Errorf("cache contained %T, which is not an Object", obj) + } + + if c.disableDeepCopy { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + } else { + // deep copy to avoid mutating cache + obj = obj.(runtime.Object).DeepCopyObject() + } + + // Copy the value of the item in the cache to the returned value + // TODO(directxman12): this is a terrible hack, pls fix (we should have deepcopyinto) + outVal := reflect.ValueOf(out) + objVal := reflect.ValueOf(obj) + if !objVal.Type().AssignableTo(outVal.Type()) { + return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type()) + } + reflect.Indirect(outVal).Set(reflect.Indirect(objVal)) + if !c.disableDeepCopy { + out.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + } + + return nil +} + +// List lists items out of the indexer and writes them to out. +func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error { + var objs []interface{} + var err error + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + switch { + case listOpts.FieldSelector != nil: + // TODO(directxman12): support more complicated field selectors by + // combining multiple indices, GetIndexers, etc + field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector) + if !requiresExact { + return fmt.Errorf("non-exact field matches are not supported by the cache") + } + // list all objects by the field selector. If this is namespaced and we have one, ask for the + // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" + // namespace. + objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) + case listOpts.Namespace != "": + objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) + default: + objs = c.indexer.List() + } + if err != nil { + return err + } + var labelSel labels.Selector + if listOpts.LabelSelector != nil { + labelSel = listOpts.LabelSelector + } + + limitSet := listOpts.Limit > 0 + + runtimeObjs := make([]runtime.Object, 0, len(objs)) + for _, item := range objs { + // if the Limit option is set and the number of items + // listed exceeds this limit, then stop reading. + if limitSet && int64(len(runtimeObjs)) >= listOpts.Limit { + break + } + obj, isObj := item.(runtime.Object) + if !isObj { + return fmt.Errorf("cache contained %T, which is not an Object", obj) + } + meta, err := apimeta.Accessor(obj) + if err != nil { + return err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + + var outObj runtime.Object + if c.disableDeepCopy { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + outObj = obj + } else { + outObj = obj.DeepCopyObject() + outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + } + runtimeObjs = append(runtimeObjs, outObj) + } + return apimeta.SetList(out, runtimeObjs) +} + +// objectKeyToStorageKey converts an object key to store key. +// It's akin to MetaNamespaceKeyFunc. It's separate from +// String to allow keeping the key format easily in sync with +// MetaNamespaceKeyFunc. +func objectKeyToStoreKey(k client.ObjectKey) string { + if k.Namespace == "" { + return k.Name + } + return k.Namespace + "/" + k.Name +} + +// requiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`. +func requiresExactMatch(sel fields.Selector) (field, val string, required bool) { + reqs := sel.Requirements() + if len(reqs) != 1 { + return "", "", false + } + req := reqs[0] + if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { + return "", "", false + } + return req.Field, req.Value, true +} + +// FieldIndexName constructs the name of the index over the given field, +// for use with an indexer. +func FieldIndexName(field string) string { + return "field:" + field +} + +// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces. +const allNamespacesNamespace = "__all_namespaces" + +// KeyToNamespacedKey prefixes the given index key with a namespace +// for use in field selector indexes. +func KeyToNamespacedKey(ns string, baseKey string) string { + if ns != "" { + return ns + "/" + baseKey + } + return allNamespacesNamespace + "/" + baseKey +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go new file mode 100644 index 000000000..27f46e327 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type InformersMap struct { + // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps + // TODO(directxman12): genericize this over different projections now that we have 3 different maps + + structured *specificInformersMap + unstructured *specificInformersMap + metadata *specificInformersMap + + // Scheme maps runtime.Objects to GroupVersionKinds + Scheme *runtime.Scheme +} + +// NewInformersMap creates a new InformersMap that can create informers for +// both structured and unstructured objects. +func NewInformersMap(config *rest.Config, + scheme *runtime.Scheme, + mapper meta.RESTMapper, + resync time.Duration, + namespace string, + selectors SelectorsByGVK, + disableDeepCopy DisableDeepCopyByGVK, + transformers TransformFuncByObject, +) *InformersMap { + return &InformersMap{ + structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + + Scheme: scheme, + } +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +func (m *InformersMap) Start(ctx context.Context) error { + go m.structured.Start(ctx) + go m.unstructured.Start(ctx) + go m.metadata.Start(ctx) + <-ctx.Done() + return nil +} + +// WaitForCacheSync waits until all the caches have been started and synced. +func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool { + syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) + syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) + syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...) + + if !m.structured.waitForStarted(ctx) { + return false + } + if !m.unstructured.waitForStarted(ctx) { + return false + } + if !m.metadata.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...) +} + +// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns +// the Informer from the map. +func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { + switch obj.(type) { + case *unstructured.Unstructured: + return m.unstructured.Get(ctx, gvk, obj) + case *unstructured.UnstructuredList: + return m.unstructured.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadata: + return m.metadata.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadataList: + return m.metadata.Get(ctx, gvk, obj) + default: + return m.structured.Get(ctx, gvk, obj) + } +} + +// newStructuredInformersMap creates a new InformersMap for structured objects. +func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch) +} + +// newUnstructuredInformersMap creates a new InformersMap for unstructured objects. +func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch) +} + +// newMetadataInformersMap creates a new InformersMap for metadata-only objects. +func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go new file mode 100644 index 000000000..54bd7eec9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// GroupVersionKindAll is the argument to represent all GroupVersionKind types. +var GroupVersionKindAll = schema.GroupVersionKind{} + +// DisableDeepCopyByGVK associate a GroupVersionKind to disable DeepCopy during get or list from cache. +type DisableDeepCopyByGVK map[schema.GroupVersionKind]bool + +// IsDisabled returns whether a GroupVersionKind is disabled DeepCopy. +func (disableByGVK DisableDeepCopyByGVK) IsDisabled(gvk schema.GroupVersionKind) bool { + if d, ok := disableByGVK[gvk]; ok { + return d + } else if d, ok = disableByGVK[GroupVersionKindAll]; ok { + return d + } + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go new file mode 100644 index 000000000..1524d2316 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -0,0 +1,480 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// clientListWatcherFunc knows how to create a ListWatcher. +type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) + +// newSpecificInformersMap returns a new specificInformersMap (like +// the generical InformersMap, except that it doesn't implement WaitForCacheSync). +func newSpecificInformersMap(config *rest.Config, + scheme *runtime.Scheme, + mapper meta.RESTMapper, + resync time.Duration, + namespace string, + selectors SelectorsByGVK, + disableDeepCopy DisableDeepCopyByGVK, + transformers TransformFuncByObject, + createListWatcher createListWatcherFunc, +) *specificInformersMap { + ip := &specificInformersMap{ + config: config, + Scheme: scheme, + mapper: mapper, + informersByGVK: make(map[schema.GroupVersionKind]*MapEntry), + codecs: serializer.NewCodecFactory(scheme), + paramCodec: runtime.NewParameterCodec(scheme), + resync: resync, + startWait: make(chan struct{}), + createListWatcher: createListWatcher, + namespace: namespace, + selectors: selectors.forGVK, + disableDeepCopy: disableDeepCopy, + transformers: transformers, + } + return ip +} + +// MapEntry contains the cached data for an Informer. +type MapEntry struct { + // Informer is the cached informer + Informer cache.SharedIndexInformer + + // CacheReader wraps Informer and implements the CacheReader interface for a single type + Reader CacheReader +} + +// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type specificInformersMap struct { + // Scheme maps runtime.Objects to GroupVersionKinds + Scheme *runtime.Scheme + + // config is used to talk to the apiserver + config *rest.Config + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // informersByGVK is the cache of informers keyed by groupVersionKind + informersByGVK map[schema.GroupVersionKind]*MapEntry + + // codecs is used to create a new REST client + codecs serializer.CodecFactory + + // paramCodec is used by list and watch + paramCodec runtime.ParameterCodec + + // stop is the stop channel to stop informers + stop <-chan struct{} + + // resync is the base frequency the informers are resynced + // a 10 percent jitter will be added to the resync period between informers + // so that all informers will not send list requests simultaneously. + resync time.Duration + + // mu guards access to the map + mu sync.RWMutex + + // start is true if the informers have been started + started bool + + // startWait is a channel that is closed after the + // informer has been started. + startWait chan struct{} + + // createClient knows how to create a client and a list object, + // and allows for abstracting over the particulars of structured vs + // unstructured objects. + createListWatcher createListWatcherFunc + + // namespace is the namespace that all ListWatches are restricted to + // default or empty string means all namespaces + namespace string + + // selectors are the label or field selectors that will be added to the + // ListWatch ListOptions. + selectors func(gvk schema.GroupVersionKind) Selector + + // disableDeepCopy indicates not to deep copy objects during get or list objects. + disableDeepCopy DisableDeepCopyByGVK + + // transform funcs are applied to objects before they are committed to the cache + transformers TransformFuncByObject +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// It doesn't return start because it can't return an error, and it's not a runnable directly. +func (ip *specificInformersMap) Start(ctx context.Context) { + func() { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Set the stop channel so it can be passed to informers that are added later + ip.stop = ctx.Done() + + // Start each informer + for _, informer := range ip.informersByGVK { + go informer.Informer.Run(ctx.Done()) + } + + // Set started to true so we immediately start any informers added later. + ip.started = true + close(ip.startWait) + }() + <-ctx.Done() +} + +func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool { + select { + case <-ip.startWait: + return true + case <-ctx.Done(): + return false + } +} + +// HasSyncedFuncs returns all the HasSynced functions for the informers in this map. +func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { + ip.mu.RLock() + defer ip.mu.RUnlock() + syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK)) + for _, informer := range ip.informersByGVK { + syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced) + } + return syncedFuncs +} + +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// the Informer from the map. +func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { + // Return the informer if it is found + i, started, ok := func() (*MapEntry, bool, bool) { + ip.mu.RLock() + defer ip.mu.RUnlock() + i, ok := ip.informersByGVK[gvk] + return i, ip.started, ok + }() + + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { + return started, nil, err + } + } + + if started && !i.Informer.HasSynced() { + // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. + if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { + return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) + } + } + + return started, i, nil +} + +func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByGVK[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + var lw *cache.ListWatch + lw, err := ip.createListWatcher(gvk, ip) + if err != nil { + return nil, false, err + } + ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + + // Check to see if there is a transformer for this gvk + if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil { + return nil, false, err + } + + rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, false, err + } + + i := &MapEntry{ + Informer: ni, + Reader: CacheReader{ + indexer: ni.GetIndexer(), + groupVersionKind: gvk, + scopeName: rm.Scope.Name(), + disableDeepCopy: ip.disableDeepCopy.IsDisabled(gvk), + }, + } + ip.informersByGVK[gvk] = i + + // Start the Informer if need by + // TODO(seans): write thorough tests and document what happens here - can you add indexers? + // can you add eventhandlers? + if ip.started { + go i.Informer.Run(ip.stop) + } + return i, ip.started, nil +} + +// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. +func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs) + if err != nil { + return nil, err + } + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := ip.Scheme.New(listGVK) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + // Create a new ListWatch for the obj + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + res := listObj.DeepCopyObject() + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) + return res, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) + }, + }, nil +} + +func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + // Create a new ListWatch for the obj + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) + } + return dynamicClient.Resource(mapping.Resource).List(ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) + } + return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) + }, + }, nil +} + +func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + + // grab the metadata client + client, err := metadata.NewForConfig(cfg) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + + // create the relevant listwatch + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + + var ( + list *metav1.PartialObjectMetadataList + err error + ) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + list, err = client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) + } else { + list, err = client.Resource(mapping.Resource).List(ctx, opts) + } + if list != nil { + for i := range list.Items { + list.Items[i].SetGroupVersionKind(gvk) + } + } + return list, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + + var ( + watcher watch.Interface + err error + ) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + watcher, err = client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) + } else { + watcher, err = client.Resource(mapping.Resource).Watch(ctx, opts) + } + if watcher != nil { + watcher = newGVKFixupWatcher(gvk, watcher) + } + return watcher, err + }, + }, nil +} + +// newGVKFixupWatcher adds a wrapper that preserves the GVK information when +// events come in. +// +// This works around a bug where GVK information is not passed into mapping +// functions when using the OnlyMetadata option in the builder. +// This issue is most likely caused by kubernetes/kubernetes#80609. +// See kubernetes-sigs/controller-runtime#1484. +// +// This was originally implemented as a cache.ResourceEventHandler wrapper but +// that contained a data race which was resolved by setting the GVK in a watch +// wrapper, before the objects are written to the cache. +// See kubernetes-sigs/controller-runtime#1650. +// +// The original watch wrapper was found to be incompatible with +// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a +// watch.Filter which is compatible. +// See kubernetes-sigs/controller-runtime#1789. +func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { + return watch.Filter( + watcher, + func(in watch.Event) (watch.Event, bool) { + in.Object.GetObjectKind().SetGroupVersionKind(gvk) + return in, true + }, + ) +} + +// resyncPeriod returns a function which generates a duration each time it is +// invoked; this is so that multiple controllers don't get into lock-step and all +// hammer the apiserver with list requests simultaneously. +func resyncPeriod(resync time.Duration) func() time.Duration { + return func() time.Duration { + // the factor will fall into [0.9, 1.1) + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec + return time.Duration(float64(resync.Nanoseconds()) * factor) + } +} + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go new file mode 100644 index 000000000..4eff32fb3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SelectorsByGVK associate a GroupVersionKind to a field/label selector. +type SelectorsByGVK map[schema.GroupVersionKind]Selector + +func (s SelectorsByGVK) forGVK(gvk schema.GroupVersionKind) Selector { + if specific, found := s[gvk]; found { + return specific + } + if defaultSelector, found := s[schema.GroupVersionKind{}]; found { + return defaultSelector + } + + return Selector{} +} + +// Selector specify the label/field selector to fill in ListOptions. +type Selector struct { + Label labels.Selector + Field fields.Selector +} + +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed. +func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { + if s.Label != nil { + listOpts.LabelSelector = s.Label.String() + } + if s.Field != nil { + listOpts.FieldSelector = s.Field.String() + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go new file mode 100644 index 000000000..8cf642c4b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go @@ -0,0 +1,50 @@ +package internal + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// TransformFuncByObject provides access to the correct transform function for +// any given GVK. +type TransformFuncByObject interface { + Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error + Get(schema.GroupVersionKind) cache.TransformFunc + SetDefault(transformer cache.TransformFunc) +} + +type transformFuncByGVK struct { + defaultTransform cache.TransformFunc + transformers map[schema.GroupVersionKind]cache.TransformFunc +} + +// NewTransformFuncByObject creates a new TransformFuncByObject instance. +func NewTransformFuncByObject() TransformFuncByObject { + return &transformFuncByGVK{ + transformers: make(map[schema.GroupVersionKind]cache.TransformFunc), + defaultTransform: nil, + } +} + +func (t *transformFuncByGVK) SetDefault(transformer cache.TransformFunc) { + t.defaultTransform = transformer +} + +func (t *transformFuncByGVK) Set(obj runtime.Object, scheme *runtime.Scheme, transformer cache.TransformFunc) error { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return err + } + + t.transformers[gvk] = transformer + return nil +} + +func (t transformFuncByGVK) Get(gvk schema.GroupVersionKind) cache.TransformFunc { + if val, ok := t.transformers[gvk]; ok { + return val + } + return t.defaultTransform +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go new file mode 100644 index 000000000..47a5bb3b3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -0,0 +1,331 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +// NewCacheFunc - Function for creating a new cache from the options and a rest config. +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) + +// a new global namespaced cache to handle cluster scoped resources. +const globalCache = "_cluster-scope" + +// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. +// This will scope the cache to a list of namespaces. Listing for all namespaces +// will list for all the namespaces that this knows about. By default this will create +// a global cache for cluster scoped resource. Note that this is not intended +// to be used for excluding namespaces, this is better done via a Predicate. Also note that +// you may face performance issues when using this with a high number of namespaces. +func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { + return func(config *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + + caches := map[string]Cache{} + + // create a cache for cluster scoped resources + gCache, err := New(config, opts) + if err != nil { + return nil, fmt.Errorf("error creating global cache: %w", err) + } + + for _, ns := range namespaces { + opts.Namespace = ns + c, err := New(config, opts) + if err != nil { + return nil, err + } + caches[ns] = c + } + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + } +} + +// multiNamespaceCache knows how to handle multiple namespaced caches +// Use this feature when scoping permissions for your +// operator to a list of namespaces instead of watching every namespace +// in the cluster. +type multiNamespaceCache struct { + namespaceToCache map[string]Cache + Scheme *runtime.Scheme + RESTMapper apimeta.RESTMapper + clusterCache Cache +} + +var _ Cache = &multiNamespaceCache{} + +// Methods for multiNamespaceCache to conform to the Informers interface. +func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + informers := map[string]Informer{} + + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInf, err := c.clusterCache.GetInformer(ctx, obj) + if err != nil { + return nil, err + } + informers[globalCache] = clusterCacheInf + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil + } + + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformer(ctx, obj) + if err != nil { + return nil, err + } + informers[ns] = informer + } + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { + informers := map[string]Informer{} + + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInf, err := c.clusterCache.GetInformerForKind(ctx, gvk) + if err != nil { + return nil, err + } + informers[globalCache] = clusterCacheInf + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil + } + + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformerForKind(ctx, gvk) + if err != nil { + return nil, err + } + informers[ns] = informer + } + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) Start(ctx context.Context) error { + // start global cache + go func() { + err := c.clusterCache.Start(ctx) + if err != nil { + log.Error(err, "cluster scoped cache failed to start") + } + }() + + // start namespaced caches + for ns, cache := range c.namespaceToCache { + go func(ns string, cache Cache) { + err := cache.Start(ctx) + if err != nil { + log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns) + } + }(ns, cache) + } + + <-ctx.Done() + return nil +} + +func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { + synced := true + for _, cache := range c.namespaceToCache { + if s := cache.WaitForCacheSync(ctx); !s { + synced = s + } + } + + // check if cluster scoped cache has synced + if !c.clusterCache.WaitForCacheSync(ctx) { + synced = false + } + return synced +} + +func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return nil //nolint:nilerr + } + + if !isNamespaced { + return c.clusterCache.IndexField(ctx, obj, field, extractValue) + } + + for _, cache := range c.namespaceToCache { + if err := cache.IndexField(ctx, obj, field, extractValue); err != nil { + return err + } + } + return nil +} + +func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error { + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look into the global cache to fetch the object + return c.clusterCache.Get(ctx, key, obj) + } + + cache, ok := c.namespaceToCache[key.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) + } + return cache.Get(ctx, key, obj) +} + +// List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. +func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look at the global cache to get the objects with the specified GVK + return c.clusterCache.List(ctx, list, opts...) + } + + if listOpts.Namespace != corev1.NamespaceAll { + cache, ok := c.namespaceToCache[listOpts.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace) + } + return cache.List(ctx, list, opts...) + } + + listAccessor, err := apimeta.ListAccessor(list) + if err != nil { + return err + } + + allItems, err := apimeta.ExtractList(list) + if err != nil { + return err + } + + limitSet := listOpts.Limit > 0 + + var resourceVersion string + for _, cache := range c.namespaceToCache { + listObj := list.DeepCopyObject().(client.ObjectList) + err = cache.List(ctx, listObj, &listOpts) + if err != nil { + return err + } + items, err := apimeta.ExtractList(listObj) + if err != nil { + return err + } + accessor, err := apimeta.ListAccessor(listObj) + if err != nil { + return fmt.Errorf("object: %T must be a list type", list) + } + allItems = append(allItems, items...) + // The last list call should have the most correct resource version. + resourceVersion = accessor.GetResourceVersion() + if limitSet { + // decrement Limit by the number of items + // fetched from the current namespace. + listOpts.Limit -= int64(len(items)) + // if a Limit was set and the number of + // items read has reached this set limit, + // then stop reading. + if listOpts.Limit == 0 { + break + } + } + } + listAccessor.SetResourceVersion(resourceVersion) + + return apimeta.SetList(list, allItems) +} + +// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces. +type multiNamespaceInformer struct { + namespaceToInformer map[string]Informer +} + +var _ Informer = &multiNamespaceInformer{} + +// AddEventHandler adds the handler to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) { + for _, informer := range i.namespaceToInformer { + informer.AddEventHandler(handler) + } +} + +// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) { + for _, informer := range i.namespaceToInformer { + informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) + } +} + +// AddIndexers adds the indexer for each namespaced informer. +func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { + for _, informer := range i.namespaceToInformer { + err := informer.AddIndexers(indexers) + if err != nil { + return err + } + } + return nil +} + +// HasSynced checks if each namespaced informer has synced. +func (i *multiNamespaceInformer) HasSynced() bool { + for _, informer := range i.namespaceToInformer { + if ok := informer.HasSynced(); !ok { + return ok + } + } + return true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go new file mode 100644 index 000000000..1030013db --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -0,0 +1,166 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certwatcher + +import ( + "context" + "crypto/tls" + "sync" + + "github.com/fsnotify/fsnotify" + "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("certwatcher") + +// CertWatcher watches certificate and key files for changes. When either file +// changes, it reads and parses both and calls an optional callback with the new +// certificate. +type CertWatcher struct { + sync.RWMutex + + currentCert *tls.Certificate + watcher *fsnotify.Watcher + + certPath string + keyPath string +} + +// New returns a new CertWatcher watching the given certificate and key. +func New(certPath, keyPath string) (*CertWatcher, error) { + var err error + + cw := &CertWatcher{ + certPath: certPath, + keyPath: keyPath, + } + + // Initial read of certificate and key. + if err := cw.ReadCertificate(); err != nil { + return nil, err + } + + cw.watcher, err = fsnotify.NewWatcher() + if err != nil { + return nil, err + } + + return cw, nil +} + +// GetCertificate fetches the currently loaded certificate, which may be nil. +func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + cw.RLock() + defer cw.RUnlock() + return cw.currentCert, nil +} + +// Start starts the watch on the certificate and key files. +func (cw *CertWatcher) Start(ctx context.Context) error { + files := []string{cw.certPath, cw.keyPath} + + for _, f := range files { + if err := cw.watcher.Add(f); err != nil { + return err + } + } + + go cw.Watch() + + log.Info("Starting certificate watcher") + + // Block until the context is done. + <-ctx.Done() + + return cw.watcher.Close() +} + +// Watch reads events from the watcher's channel and reacts to changes. +func (cw *CertWatcher) Watch() { + for { + select { + case event, ok := <-cw.watcher.Events: + // Channel is closed. + if !ok { + return + } + + cw.handleEvent(event) + + case err, ok := <-cw.watcher.Errors: + // Channel is closed. + if !ok { + return + } + + log.Error(err, "certificate watch error") + } + } +} + +// ReadCertificate reads the certificate and key files from disk, parses them, +// and updates the current certificate on the watcher. If a callback is set, it +// is invoked with the new certificate. +func (cw *CertWatcher) ReadCertificate() error { + metrics.ReadCertificateTotal.Inc() + cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath) + if err != nil { + metrics.ReadCertificateErrors.Inc() + return err + } + + cw.Lock() + cw.currentCert = &cert + cw.Unlock() + + log.Info("Updated current TLS certificate") + + return nil +} + +func (cw *CertWatcher) handleEvent(event fsnotify.Event) { + // Only care about events which may modify the contents of the file. + if !(isWrite(event) || isRemove(event) || isCreate(event)) { + return + } + + log.V(1).Info("certificate event", "event", event) + + // If the file was removed, re-add the watch. + if isRemove(event) { + if err := cw.watcher.Add(event.Name); err != nil { + log.Error(err, "error re-watching file") + } + } + + if err := cw.ReadCertificate(); err != nil { + log.Error(err, "error re-reading certificate") + } +} + +func isWrite(event fsnotify.Event) bool { + return event.Op&fsnotify.Write == fsnotify.Write +} + +func isCreate(event fsnotify.Event) bool { + return event.Op&fsnotify.Create == fsnotify.Create +} + +func isRemove(event fsnotify.Event) bool { + return event.Op&fsnotify.Remove == fsnotify.Remove +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go new file mode 100644 index 000000000..40c2fc0bf --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package certwatcher is a helper for reloading Certificates from disk to be used +with tls servers. It provides a helper func `GetCertificate` which can be +called from `tls.Config` and passed into your tls.Listener. For a detailed +example server view pkg/webhook/server.go. +*/ +package certwatcher diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go new file mode 100644 index 000000000..05869eff0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics/metrics.go @@ -0,0 +1,45 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // ReadCertificateTotal is a prometheus counter metrics which holds the total + // number of certificate reads. + ReadCertificateTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_total", + Help: "Total number of certificate reads", + }) + + // ReadCertificateErrors is a prometheus counter metrics which holds the total + // number of errors from certificate read. + ReadCertificateErrors = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "certwatcher_read_certificate_errors_total", + Help: "Total number of certificate read errors", + }) +) + +func init() { + metrics.Registry.MustRegister( + ReadCertificateTotal, + ReadCertificateErrors, + ) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go new file mode 100644 index 000000000..c92b0eaae --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -0,0 +1,196 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. +package apiutil + +import ( + "fmt" + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +var ( + protobufScheme = runtime.NewScheme() + protobufSchemeLock sync.RWMutex +) + +func init() { + // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers. + // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can. + // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + if err := clientgoscheme.AddToScheme(protobufScheme); err != nil { + panic(err) + } +} + +// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should +// be additional types that do support protobuf. +func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { + protobufSchemeLock.Lock() + defer protobufSchemeLock.Unlock() + return addToScheme(protobufScheme) +} + +// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery +// information fetched by a new client with the given config. +func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { + // Get a mapper + dc, err := discovery.NewDiscoveryClientForConfig(c) + if err != nil { + return nil, err + } + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. +func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + + gvks, isUnversioned, err := scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if isUnversioned { + return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) + } + + if len(gvks) < 1 { + return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) + } + if len(gvks) > 1 { + // this should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine + return schema.GroupVersionKind{}, fmt.Errorf( + "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + } + return gvks[0], nil +} + +// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { + return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) +} + +// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory +// in order to avoid clearing the GVK from the decoded object. +// +// See https://github.com/kubernetes/kubernetes/issues/80609. +type serializerWithDecodedGVK struct { + serializer.WithoutConversionCodecFactory +} + +// DecoderToVersion returns an decoder that does not do conversion. +func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return serializer +} + +// createRestConfig copies the base config and updates needed fields for a new rest config. +func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { + gv := gvk.GroupVersion() + + cfg := rest.CopyConfig(baseConfig) + cfg.GroupVersion = &gv + if gvk.Group == "" { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + if cfg.UserAgent == "" { + cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true. + if cfg.ContentType == "" && !isUnstructured { + protobufSchemeLock.RLock() + if protobufScheme.Recognizes(gvk) { + cfg.ContentType = runtime.ContentTypeProtobuf + } + protobufSchemeLock.RUnlock() + } + + if isUnstructured { + // If the object is unstructured, we need to preserve the GVK information. + // Use our own custom serializer. + cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } else { + cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } + + return cfg +} + +type serializerWithTargetZeroingDecode struct { + runtime.NegotiatedSerializer +} + +func (s serializerWithTargetZeroingDecode) DecoderToVersion(serializer runtime.Decoder, r runtime.GroupVersioner) runtime.Decoder { + return targetZeroingDecoder{upstream: s.NegotiatedSerializer.DecoderToVersion(serializer, r)} +} + +type targetZeroingDecoder struct { + upstream runtime.Decoder +} + +func (t targetZeroingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + zero(into) + return t.upstream.Decode(data, defaults, into) +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go new file mode 100644 index 000000000..e6cc51c6e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -0,0 +1,294 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "errors" + "sync" + "sync/atomic" + + "golang.org/x/time/rate" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// dynamicRESTMapper is a RESTMapper that dynamically discovers resource +// types at runtime. +type dynamicRESTMapper struct { + mu sync.RWMutex // protects the following fields + staticMapper meta.RESTMapper + limiter *rate.Limiter + newMapper func() (meta.RESTMapper, error) + + lazy bool + // Used for lazy init. + inited uint32 + initMtx sync.Mutex +} + +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. +type DynamicRESTMapperOption func(*dynamicRESTMapper) error + +// WithLimiter sets the RESTMapper's underlying limiter to lim. +func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.limiter = lim + return nil + } +} + +// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings +// until an API call is made. +var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.lazy = true + return nil +} + +// WithCustomMapper supports setting a custom RESTMapper refresher instead of +// the default method, which uses a discovery client. +// +// This exists mainly for testing, but can be useful if you need tighter control +// over how discovery is performed, which discovery endpoints are queried, etc. +func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.newMapper = newMapper + return nil + } +} + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. opts +// configure the RESTMapper. +func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + drm := &dynamicRESTMapper{ + limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), + newMapper: func() (meta.RESTMapper, error) { + groupResources, err := restmapper.GetAPIGroupResources(client) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(groupResources), nil + }, + } + for _, opt := range opts { + if err = opt(drm); err != nil { + return nil, err + } + } + if !drm.lazy { + if err := drm.setStaticMapper(); err != nil { + return nil, err + } + } + return drm, nil +} + +var ( + // defaultRefilRate is the default rate at which potential calls are + // added back to the "bucket" of allowed calls. + defaultRefillRate = 5 + // defaultLimitSize is the default starting/max number of potential calls + // per second. Once a call is used, it's added back to the bucket at a rate + // of defaultRefillRate per second. + defaultLimitSize = 5 +) + +// setStaticMapper sets drm's staticMapper by querying its client, regardless +// of reload backoff. +func (drm *dynamicRESTMapper) setStaticMapper() error { + newMapper, err := drm.newMapper() + if err != nil { + return err + } + drm.staticMapper = newMapper + return nil +} + +// init initializes drm only once if drm is lazy. +func (drm *dynamicRESTMapper) init() (err error) { + // skip init if drm is not lazy or has initialized + if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 { + return nil + } + + drm.initMtx.Lock() + defer drm.initMtx.Unlock() + if drm.inited == 0 { + if err = drm.setStaticMapper(); err == nil { + atomic.StoreUint32(&drm.inited, 1) + } + } + return err +} + +// checkAndReload attempts to call the given callback, which is assumed to be dependent +// on the data in the restmapper. +// +// If the callback returns an error that matches the given error, it will attempt to reload +// the RESTMapper's data and re-call the callback once that's occurred. +// If the callback returns any other error, the function will return immediately regardless. +// +// It will take care of ensuring that reloads are rate-limited and that extraneous calls +// aren't made. If a reload would exceed the limiters rate, it returns the error return by +// the callback. +// It's thread-safe, and worries about thread-safety for the callback (so the callback does +// not need to attempt to lock the restmapper). +func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error { + // first, check the common path -- data is fresh enough + // (use an IIFE for the lock's defer) + err := func() error { + drm.mu.RLock() + defer drm.mu.RUnlock() + + return checkNeedsReload() + }() + + // NB(directxman12): `Is` and `As` have a confusing relationship -- + // `Is` is like `== or does this implement .Is`, whereas `As` says + // `can I type-assert into` + needsReload := errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // if the data wasn't fresh, we'll need to try and update it, so grab the lock... + drm.mu.Lock() + defer drm.mu.Unlock() + + // ... and double-check that we didn't reload in the meantime + err = checkNeedsReload() + needsReload = errors.As(err, &needsReloadErr) + if !needsReload { + return err + } + + // we're still stale, so grab a rate-limit token if we can... + if !drm.limiter.Allow() { + // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) + // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError + return err + } + + // ...reload... + if err := drm.setStaticMapper(); err != nil { + return err + } + + // ...and return the results of the closure regardless + return checkNeedsReload() +} + +// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. + +func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionKind{}, err + } + var gvk schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvk, err = drm.staticMapper.KindFor(resource) + return err + }) + return gvk, err +} + +func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvks []schema.GroupVersionKind + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvks, err = drm.staticMapper.KindsFor(resource) + return err + }) + return gvks, err +} + +func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionResource{}, err + } + + var gvr schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvr, err = drm.staticMapper.ResourceFor(input) + return err + }) + return gvr, err +} + +func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvrs []schema.GroupVersionResource + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + gvrs, err = drm.staticMapper.ResourcesFor(input) + return err + }) + return gvrs, err +} + +func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mapping *meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mapping, err = drm.staticMapper.RESTMapping(gk, versions...) + return err + }) + return mapping, err +} + +func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mappings []*meta.RESTMapping + err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error { + var err error + mappings, err = drm.staticMapper.RESTMappings(gk, versions...) + return err + }) + return mappings, err +} + +func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { + if err := drm.init(); err != nil { + return "", err + } + var singular string + err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error { + var err error + singular, err = drm.staticMapper.ResourceSingularizer(resource) + return err + }) + return singular, err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go new file mode 100644 index 000000000..bbe36c467 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -0,0 +1,328 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// WarningHandlerOptions are options for configuring a +// warning handler for the client which is responsible +// for surfacing API Server warnings. +type WarningHandlerOptions struct { + // SuppressWarnings decides if the warnings from the + // API server are suppressed or surfaced in the client. + SuppressWarnings bool + // AllowDuplicateLogs does not deduplicate the to-be + // logged surfaced warnings messages. See + // log.WarningHandlerOptions for considerations + // regarding deuplication + AllowDuplicateLogs bool +} + +// Options are creation options for a Client. +type Options struct { + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Opts is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + Opts WarningHandlerOptions +} + +// New returns a new Client using the provided config and Options. +// The returned client reads *and* writes directly from the server +// (it doesn't use object caches). It understands how to work with +// normal types (both custom resources and aggregated/built-in resources), +// as well as unstructured types. +// +// In the case of normal types, the scheme will be used to look up the +// corresponding group, version, and kind for the given type. In the +// case of unstructured types, the group, version, and kind will be extracted +// from the corresponding fields on the object. +func New(config *rest.Config, options Options) (Client, error) { + return newClient(config, options) +} + +func newClient(config *rest.Config, options Options) (*client, error) { + if config == nil { + return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") + } + + if !options.Opts.SuppressWarnings { + // surface warnings + logger := log.Log.WithName("KubeAPIWarningLogger") + // Set a WarningHandler, the default WarningHandler + // is log.KubeAPIWarningLogger with deduplication enabled. + // See log.KubeAPIWarningLoggerOptions for considerations + // regarding deduplication. + rest.SetDefaultWarningHandler( + log.NewKubeAPIWarningLogger( + logger, + log.KubeAPIWarningLoggerOptions{ + Deduplicate: !options.Opts.AllowDuplicateLogs, + }, + ), + ) + } + + // Init a scheme if none provided + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + // Init a Mapper if none provided + if options.Mapper == nil { + var err error + options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + if err != nil { + return nil, err + } + } + + clientcache := &clientCache{ + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + + structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + } + + rawMetaClient, err := metadata.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) + } + + c := &client{ + typedClient: typedClient{ + cache: clientcache, + paramCodec: runtime.NewParameterCodec(options.Scheme), + }, + unstructuredClient: unstructuredClient{ + cache: clientcache, + paramCodec: noConversionParamCodec{}, + }, + metadataClient: metadataClient{ + client: rawMetaClient, + restMapper: options.Mapper, + }, + scheme: options.Scheme, + mapper: options.Mapper, + } + + return c, nil +} + +var _ Client = &client{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type client struct { + typedClient typedClient + unstructuredClient unstructuredClient + metadataClient metadataClient + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + +// Scheme returns the scheme this client is using. +func (c *client) Scheme() *runtime.Scheme { + return c.scheme +} + +// RESTMapper returns the scheme this client is using. +func (c *client) RESTMapper() meta.RESTMapper { + return c.mapper +} + +// Create implements client.Client. +func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) + } +} + +// Update implements client.Client. +func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) + } +} + +// Delete implements client.Client. +func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) + } +} + +// DeleteAllOf implements client.Client. +func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) + } +} + +// Patch implements client.Client. +func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) + } +} + +// Get implements client.Client. +func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Get(ctx, key, obj) + case *metav1.PartialObjectMetadata: + // Metadata only object should always preserve the GVK coming in from the caller. + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + return c.metadataClient.Get(ctx, key, obj) + default: + return c.typedClient.Get(ctx, key, obj) + } +} + +// List implements client.Client. +func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + switch x := obj.(type) { + case *unstructured.UnstructuredList: + return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + // Metadata only object should always preserve the GVK. + gvk := obj.GetObjectKind().GroupVersionKind() + defer c.resetGroupVersionKind(obj, gvk) + + // Call the list client. + if err := c.metadataClient.List(ctx, obj, opts...); err != nil { + return err + } + + // Restore the GVK for each item in the list. + itemGVK := schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + Kind: strings.TrimSuffix(gvk.Kind, "List"), + } + for i := range x.Items { + item := &x.Items[i] + item.SetGroupVersionKind(itemGVK) + } + + return nil + default: + return c.typedClient.List(ctx, obj, opts...) + } +} + +// Status implements client.StatusClient. +func (c *client) Status() StatusWriter { + return &statusWriter{client: c} +} + +// statusWriter is client.StatusWriter that writes status subresource. +type statusWriter struct { + client *client +} + +// ensure statusWriter implements client.StatusWriter. +var _ StatusWriter = &statusWriter{} + +// Update implements client.StatusWriter. +func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) + } +} + +// Patch implements client.Client. +func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...) + default: + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go new file mode 100644 index 000000000..857a0b38a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// clientCache creates and caches rest clients and metadata for Kubernetes types. +type clientCache struct { + // config is the rest.Config to talk to an apiserver + config *rest.Config + + // scheme maps go structs to GroupVersionKinds + scheme *runtime.Scheme + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // codecs are used to create a REST client for a gvk + codecs serializer.CodecFactory + + // structuredResourceByType caches structured type metadata + structuredResourceByType map[schema.GroupVersionKind]*resourceMeta + // unstructuredResourceByType caches unstructured type metadata + unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta + mu sync.RWMutex +} + +// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { + if strings.HasSuffix(gvk.Kind, "List") && isList { + // if this was a list, treat it as a request for the item's resource + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + if err != nil { + return nil, err + } + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil +} + +// getResource returns the resource meta information for the given type of object. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + isUnstructured = isUnstructured || isUnstructuredList + + // It's better to do creation work twice than to not let multiple + // people make requests at once + c.mu.RLock() + resourceByType := c.structuredResourceByType + if isUnstructured { + resourceByType = c.unstructuredResourceByType + } + r, known := resourceByType[gvk] + c.mu.RUnlock() + + if known { + return r, nil + } + + // Initialize a new Client + c.mu.Lock() + defer c.mu.Unlock() + r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured) + if err != nil { + return nil, err + } + resourceByType[gvk] = r + return r, err +} + +// getObjMeta returns objMeta containing both type and object metadata and state. +func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { + r, err := c.getResource(obj) + if err != nil { + return nil, err + } + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + return &objMeta{resourceMeta: r, Object: m}, err +} + +// resourceMeta caches state for a Kubernetes type. +type resourceMeta struct { + // client is the rest client used to talk to the apiserver + rest.Interface + // gvk is the GroupVersionKind of the resourceMeta + gvk schema.GroupVersionKind + // mapping is the rest mapping + mapping *meta.RESTMapping +} + +// isNamespaced returns true if the type is namespaced. +func (r *resourceMeta) isNamespaced() bool { + return r.mapping.Scope.Name() != meta.RESTScopeNameRoot +} + +// resource returns the resource name of the type. +func (r *resourceMeta) resource() string { + return r.mapping.Resource.Resource +} + +// objMeta stores type and object information about a Kubernetes type. +type objMeta struct { + // resourceMeta contains type information for the object + *resourceMeta + + // Object contains meta data for the object instance + metav1.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go new file mode 100644 index 000000000..9c2923106 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "errors" + "net/url" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.ParameterCodec = noConversionParamCodec{} + +// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings. +// it's useful in scenarios with the unstructured client and arbitrary resources. +type noConversionParamCodec struct{} + +func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) { + return queryparams.Convert(obj) +} + +func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error { + return errors.New("DecodeParameters not implemented on noConversionParamCodec") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go new file mode 100644 index 000000000..fd2772412 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -0,0 +1,157 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var ( + kubeconfig string + log = logf.RuntimeLog.WithName("client").WithName("config") +) + +func init() { + // TODO: Fix this to allow double vendoring this library but still register flags on behalf of users + flag.StringVar(&kubeconfig, "kubeconfig", "", + "Paths to a kubeconfig. Only required if out-of-cluster.") +} + +// GetConfig creates a *rest.Config for talking to a Kubernetes API server. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists. +func GetConfig() (*rest.Config, error) { + return GetConfigWithContext("") +} + +// GetConfigWithContext creates a *rest.Config for talking to a Kubernetes API server with a specific context. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// It also applies saner defaults for QPS and burst based on the Kubernetes +// controller manager defaults (20 QPS, 30 burst) +// +// Config precedence +// +// * --kubeconfig flag pointing at a file +// +// * KUBECONFIG environment variable pointing at a file +// +// * In-cluster config if running in cluster +// +// * $HOME/.kube/config if exists. +func GetConfigWithContext(context string) (*rest.Config, error) { + cfg, err := loadConfig(context) + if err != nil { + return nil, err + } + + if cfg.QPS == 0.0 { + cfg.QPS = 20.0 + cfg.Burst = 30.0 + } + + return cfg, nil +} + +// loadInClusterConfig is a function used to load the in-cluster +// Kubernetes client config. This variable makes is possible to +// test the precedence of loading the config. +var loadInClusterConfig = rest.InClusterConfig + +// loadConfig loads a REST Config as per the rules specified in GetConfig. +func loadConfig(context string) (*rest.Config, error) { + // If a flag is specified with the config location, use that + if len(kubeconfig) > 0 { + return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context) + } + + // If the recommended kubeconfig env variable is not specified, + // try the in-cluster config. + kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if len(kubeconfigPath) == 0 { + if c, err := loadInClusterConfig(); err == nil { + return c, nil + } + } + + // If the recommended kubeconfig env variable is set, or there + // is no in-cluster config, try the default recommended locations. + // + // NOTE: For default config file locations, upstream only checks + // $HOME for the user's home directory, but we can also try + // os/user.HomeDir when $HOME is unset. + // + // TODO(jlanford): could this be done upstream? + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + if _, ok := os.LookupEnv("HOME"); !ok { + u, err := user.Current() + if err != nil { + return nil, fmt.Errorf("could not get current user: %w", err) + } + loadingRules.Precedence = append(loadingRules.Precedence, filepath.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName)) + } + + return loadConfigWithContext("", loadingRules, context) +} + +func loadConfigWithContext(apiServerURL string, loader clientcmd.ClientConfigLoader, context string) (*rest.Config, error) { + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + loader, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: apiServerURL, + }, + CurrentContext: context, + }).ClientConfig() +} + +// GetConfigOrDie creates a *rest.Config for talking to a Kubernetes apiserver. +// If --kubeconfig is set, will use the kubeconfig file at that location. Otherwise will assume running +// in cluster and use the cluster provided kubeconfig. +// +// Will log an error and exit if there is an error creating the rest.Config. +func GetConfigOrDie() *rest.Config { + config, err := GetConfig() + if err != nil { + log.Error(err, "unable to get kubeconfig") + os.Exit(1) + } + return config +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go new file mode 100644 index 000000000..796c9cf59 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config contains libraries for initializing REST configs for talking to the Kubernetes API +package config diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go new file mode 100644 index 000000000..2965e5fa9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -0,0 +1,49 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client contains functionality for interacting with Kubernetes API +// servers. +// +// Clients +// +// Clients are split into two interfaces -- Readers and Writers. Readers +// get and list, while writers create, update, and delete. +// +// The New function can be used to create a new client that talks directly +// to the API server. +// +// A common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the DelegatingClient type, which can +// be used to have a client whose Reader is different from the Writer. +// +// Options +// +// Many client operations in Kubernetes support options. These options are +// represented as variadic arguments at the end of a given method call. +// For instance, to use a label selector on list, you can call +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// Indexing +// +// Indexes may be added to caches using a FieldIndexer. This allows you to easily +// and efficiently look up objects with certain properties. You can then make +// use of the index by specifying a field selector on calls to List on the Reader +// corresponding to the given Cache. +// +// For instance, a Secret controller might have an index on the +// `.spec.volumes.secret.secretName` field in Pod objects, so that it could +// easily look up all pods that reference a given secret. +package client diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go new file mode 100644 index 000000000..ea25ea253 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -0,0 +1,106 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDryRunClient wraps an existing client and enforces DryRun mode +// on all mutating api calls. +func NewDryRunClient(c Client) Client { + return &dryRunClient{client: c} +} + +var _ Client = &dryRunClient{} + +// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode. +type dryRunClient struct { + client Client +} + +// Scheme returns the scheme this client is using. +func (c *dryRunClient) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +// RESTMapper returns the rest mapper this client is using. +func (c *dryRunClient) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +// Create implements client.Client. +func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append(opts, DryRunAll)...) +} + +// Update implements client.Client. +func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Delete implements client.Client. +func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) +} + +// DeleteAllOf implements client.Client. +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.Client. +func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +// Get implements client.Client. +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + return c.client.Get(ctx, key, obj) +} + +// List implements client.Client. +func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (c *dryRunClient) Status() StatusWriter { + return &dryRunStatusWriter{client: c.client.Status()} +} + +// ensure dryRunStatusWriter implements client.StatusWriter. +var _ StatusWriter = &dryRunStatusWriter{} + +// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode +// enforced. +type dryRunStatusWriter struct { + client StatusWriter +} + +// Update implements client.StatusWriter. +func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.StatusWriter. +func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go new file mode 100644 index 000000000..58c2ece15 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -0,0 +1,145 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// ObjectKey identifies a Kubernetes Object. +type ObjectKey = types.NamespacedName + +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object. +func ObjectKeyFromObject(obj Object) ObjectKey { + return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} +} + +// Patch is a patch that can be applied to a Kubernetes object. +type Patch interface { + // Type is the PatchType of the patch. + Type() types.PatchType + // Data is the raw data representing the patch. + Data(obj Object) ([]byte, error) +} + +// TODO(directxman12): is there a sane way to deal with get/delete options? + +// Reader knows how to read and list Kubernetes objects. +type Reader interface { + // Get retrieves an obj for the given object key from the Kubernetes Cluster. + // obj must be a struct pointer so that obj can be updated with the response + // returned by the Server. + Get(ctx context.Context, key ObjectKey, obj Object) error + + // List retrieves list of objects for a given namespace and list options. On a + // successful call, Items field in the list will be populated with the + // result returned from the server. + List(ctx context.Context, list ObjectList, opts ...ListOption) error +} + +// Writer knows how to create, delete, and update Kubernetes objects. +type Writer interface { + // Create saves the object obj in the Kubernetes cluster. + Create(ctx context.Context, obj Object, opts ...CreateOption) error + + // Delete deletes the given obj from Kubernetes cluster. + Delete(ctx context.Context, obj Object, opts ...DeleteOption) error + + // Update updates the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error +} + +// StatusClient knows how to create a client which can update status subresource +// for kubernetes objects. +type StatusClient interface { + Status() StatusWriter +} + +// StatusWriter knows how to update status subresource of a Kubernetes object. +type StatusWriter interface { + // Update updates the fields corresponding to the status subresource for the + // given obj. obj must be a struct pointer so that obj can be updated + // with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given object's subresource. obj must be a struct + // pointer so that obj can be updated with the content returned by the + // Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error +} + +// Client knows how to perform CRUD operations on Kubernetes objects. +type Client interface { + Reader + Writer + StatusClient + + // Scheme returns the scheme this client is using. + Scheme() *runtime.Scheme + // RESTMapper returns the rest this client is using. + RESTMapper() meta.RESTMapper +} + +// WithWatch supports Watch on top of the CRUD operations supported by +// the normal Client. Its intended use-case are CLI apps that need to wait for +// events. +type WithWatch interface { + Client + Watch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) +} + +// IndexerFunc knows how to take an object and turn it into a series +// of non-namespaced keys. Namespaced objects are automatically given +// namespaced and non-spaced variants, so keys do not need to include namespace. +type IndexerFunc func(Object) []string + +// FieldIndexer knows how to index over a particular "field" such that it +// can later be used by a field selector. +type FieldIndexer interface { + // IndexFields adds an index with the given field name on the given object type + // by using the given function to extract the value for that field. If you want + // compatibility with the Kubernetes API server, only return one key, and only use + // fields that the API server supports. Otherwise, you can return multiple keys, + // and "equality" in the field selector means that at least one key matches the value. + // The FieldIndexer will automatically take care of indexing over namespace + // and supporting efficient all-namespace queries. + IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error +} + +// IgnoreNotFound returns nil on NotFound errors. +// All other values that are not NotFound errors or nil are returned unmodified. +func IgnoreNotFound(err error) error { + if apierrors.IsNotFound(err) { + return nil + } + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go new file mode 100644 index 000000000..db495ca46 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -0,0 +1,193 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client. +func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client. +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client. +func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client. +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(ctx, key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client. +func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(ctx, *listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status") + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go new file mode 100644 index 000000000..45a694543 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -0,0 +1,213 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +// NewNamespacedClient wraps an existing client enforcing the namespace value. +// All functions using this client will have the same namespace declared here. +func NewNamespacedClient(c Client, ns string) Client { + return &namespacedClient{ + client: c, + namespace: ns, + } +} + +var _ Client = &namespacedClient{} + +// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value. +type namespacedClient struct { + namespace string + client Client +} + +// Scheme returns the scheme this client is using. +func (n *namespacedClient) Scheme() *runtime.Scheme { + return n.client.Scheme() +} + +// RESTMapper returns the scheme this client is using. +func (n *namespacedClient) RESTMapper() meta.RESTMapper { + return n.client.RESTMapper() +} + +// Create implements clinet.Client. +func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Create(ctx, obj, opts...) +} + +// Update implements client.Client. +func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Update(ctx, obj, opts...) +} + +// Delete implements client.Client. +func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Delete(ctx, obj, opts...) +} + +// DeleteAllOf implements client.Client. +func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + if isNamespaceScoped { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client. +func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Patch(ctx, obj, patch, opts...) +} + +// Get implements client.Client. +func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + if isNamespaceScoped { + if key.Namespace != "" && key.Namespace != n.namespace { + return fmt.Errorf("namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) + } + key.Namespace = n.namespace + } + return n.client.Get(ctx, key, obj) +} + +// List implements client.Client. +func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if n.namespace != "" { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (n *namespacedClient) Status() StatusWriter { + return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} +} + +// ensure namespacedClientStatusWriter implements client.StatusWriter. +var _ StatusWriter = &namespacedClientStatusWriter{} + +type namespacedClientStatusWriter struct { + StatusClient StatusWriter + namespace string + namespacedclient Client +} + +// Update implements client.StatusWriter. +func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Update(ctx, obj, opts...) +} + +// Patch implements client.StatusWriter. +func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.StatusClient.Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go new file mode 100644 index 000000000..31e334d6c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go @@ -0,0 +1,77 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Object is a Kubernetes object, allows functions to work indistinctly with +// any resource that implements both Object interfaces. +// +// Semantically, these are objects which are both serializable (runtime.Object) +// and identifiable (metav1.Object) -- think any object which you could write +// as YAML or JSON, and then `kubectl create`. +// +// Code-wise, this means that any object which embeds both ObjectMeta (which +// provides metav1.Object) and TypeMeta (which provides half of runtime.Object) +// and has a `DeepCopyObject` implementation (the other half of runtime.Object) +// will implement this by default. +// +// For example, nearly all the built-in types are Objects, as well as all +// KubeBuilder-generated CRDs (unless you do something real funky to them). +// +// By and large, most things that implement runtime.Object also implement +// Object -- it's very rare to have *just* a runtime.Object implementation (the +// cases tend to be funky built-in types like Webhook payloads that don't have +// a `metadata` field). +// +// Notice that XYZList types are distinct: they implement ObjectList instead. +type Object interface { + metav1.Object + runtime.Object +} + +// ObjectList is a Kubernetes object list, allows functions to work +// indistinctly with any resource that implements both runtime.Object and +// metav1.ListInterface interfaces. +// +// Semantically, this is any object which may be serialized (ObjectMeta), and +// is a kubernetes list wrapper (has items, pagination fields, etc) -- think +// the wrapper used in a response from a `kubectl list --output yaml` call. +// +// Code-wise, this means that any object which embedds both ListMeta (which +// provides metav1.ListInterface) and TypeMeta (which provides half of +// runtime.Object) and has a `DeepCopyObject` implementation (the other half of +// runtime.Object) will implement this by default. +// +// For example, nearly all the built-in XYZList types are ObjectLists, as well +// as the XYZList types for all KubeBuilder-generated CRDs (unless you do +// something real funky to them). +// +// By and large, most things that are XYZList and implement runtime.Object also +// implement ObjectList -- it's very rare to have *just* a runtime.Object +// implementation (the cases tend to be funky built-in types like Webhook +// payloads that don't have a `metadata` field). +// +// This is similar to Object, which is almost always implemented by the items +// in the list themselves. +type ObjectList interface { + metav1.ListInterface + runtime.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go new file mode 100644 index 000000000..7990f56ab --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -0,0 +1,697 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" +) + +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +// ApplyToCreate applies this configuration to the given create options. +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToUpdate applies this configuration to the given update options. +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given patch options. +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given delete options. +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} +func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// }}} + +// {{{ Create Options + +// CreateOptions contains options for create requests. It's generally a subset +// of metav1.CreateOptions. +type CreateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw CreateOptions, as passed to the API server. + Raw *metav1.CreateOptions +} + +// AsCreateOptions returns these options as a metav1.CreateOptions. +// This may mutate the Raw field. +func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { + if o == nil { + return &metav1.CreateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.CreateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) + } + return o +} + +// ApplyToCreate implements CreateOption. +func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { + if o.DryRun != nil { + co.DryRun = o.DryRun + } + if o.FieldManager != "" { + co.FieldManager = o.FieldManager + } + if o.Raw != nil { + co.Raw = o.Raw + } +} + +var _ CreateOption = &CreateOptions{} + +// }}} + +// {{{ Delete Options + +// DeleteOptions contains options for delete requests. It's generally a subset +// of metav1.DeleteOptions. +type DeleteOptions struct { + // GracePeriodSeconds is the duration in seconds before the object should be + // deleted. Value must be non-negative integer. The value zero indicates + // delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + GracePeriodSeconds *int64 + + // Preconditions must be fulfilled before a deletion is carried out. If not + // possible, a 409 Conflict status will be returned. + Preconditions *metav1.Preconditions + + // PropagationPolicy determined whether and how garbage collection will be + // performed. Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + PropagationPolicy *metav1.DeletionPropagation + + // Raw represents raw DeleteOptions, as passed to the API server. + Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string +} + +// AsDeleteOptions returns these options as a metav1.DeleteOptions. +// This may mutate the Raw field. +func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { + if o == nil { + return &metav1.DeleteOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.DeleteOptions{} + } + + o.Raw.GracePeriodSeconds = o.GracePeriodSeconds + o.Raw.Preconditions = o.Preconditions + o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) + } + return o +} + +var _ DeleteOption = &DeleteOptions{} + +// ApplyToDelete implements DeleteOption. +func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { + if o.GracePeriodSeconds != nil { + do.GracePeriodSeconds = o.GracePeriodSeconds + } + if o.Preconditions != nil { + do.Preconditions = o.Preconditions + } + if o.PropagationPolicy != nil { + do.PropagationPolicy = o.PropagationPolicy + } + if o.Raw != nil { + do.Raw = o.Raw + } + if o.DryRun != nil { + do.DryRun = o.DryRun + } +} + +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +// ApplyToDelete applies this configuration to the given delete options. +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions metav1.Preconditions + +// ApplyToDelete applies this configuration to the given delete options. +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// PropagationPolicy determined whether and how garbage collection will be +// performed. Either this field or OrphanDependents may be set, but not both. +// The default policy is decided by the existing finalizer set in the +// metadata.finalizers and the resource-specific default policy. +// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - +// allow the garbage collector to delete the dependents in the background; +// 'Foreground' - a cascading policy that deletes all dependents in the +// foreground. +type PropagationPolicy metav1.DeletionPropagation + +// ApplyToDelete applies the given delete options on these options. +// It will propagate to the dependents of the object to let the garbage collector handle it. +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ List Options + +// ListOptions contains options for limiting or filtering results. +// It's generally a subset of metav1.ListOptions, with support for +// pre-parsed selectors (since generally, selectors will be executed +// against the cache). +type ListOptions struct { + // LabelSelector filters results by label. Use labels.Parse() to + // set from raw string form. + LabelSelector labels.Selector + // FieldSelector filters results by a particular field. In order + // to use this with cache-based implementations, restrict usage to + // a single field-value pair that's been added to the indexers. + FieldSelector fields.Selector + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string + + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + + // Raw represents raw ListOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface, + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. + Raw *metav1.ListOptions +} + +var _ ListOption = &ListOptions{} + +// ApplyToList implements ListOption for ListOptions. +func (o *ListOptions) ApplyToList(lo *ListOptions) { + if o.LabelSelector != nil { + lo.LabelSelector = o.LabelSelector + } + if o.FieldSelector != nil { + lo.FieldSelector = o.FieldSelector + } + if o.Namespace != "" { + lo.Namespace = o.Namespace + } + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue + } +} + +// AsListOptions returns these options as a flattened metav1.ListOptions. +// This may mutate the Raw field. +func (o *ListOptions) AsListOptions() *metav1.ListOptions { + if o == nil { + return &metav1.ListOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.ListOptions{} + } + if o.LabelSelector != nil { + o.Raw.LabelSelector = o.LabelSelector.String() + } + if o.FieldSelector != nil { + o.Raw.FieldSelector = o.FieldSelector.String() + } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } + return o.Raw +} + +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) + } + return o +} + +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + sel := labels.SelectorFromValidatedSet(map[string]string(m)) + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// HasLabels filters the list/delete operation checking if the set of labels exists +// without checking their values. +type HasLabels []string + +// ApplyToList applies this configuration to the given list options. +func (m HasLabels) ApplyToList(opts *ListOptions) { + sel := labels.NewSelector() + for _, label := range m { + r, err := labels.NewRequirement(label, selection.Exists, nil) + if err == nil { + sel = sel.Add(*r) + } + } + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingLabelsSelector filters the list/delete operation on the given label +// selector (or index in the case of cached lists). A struct is used because +// labels.Selector is an interface, which cannot be aliased. +type MatchingLabelsSelector struct { + labels.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { + opts.LabelSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFields filters the list/delete operation on the given field Set +// (or index in the case of cached lists). +type MatchingFields fields.Set + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.Set(m).AsSelector() + opts.FieldSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFieldsSelector filters the list/delete operation on the given field +// selector (or index in the case of cached lists). A struct is used because +// fields.Selector is an interface, which cannot be aliased. +type MatchingFieldsSelector struct { + fields.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { + opts.FieldSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +// ApplyToList applies this configuration to the given list options. +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +// ApplyToList applies this configuration to the given an list options. +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +// ApplyToList applies this configuration to the given an List options. +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + +// }}} + +// {{{ Update Options + +// UpdateOptions contains options for create requests. It's generally a subset +// of metav1.UpdateOptions. +type UpdateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw UpdateOptions, as passed to the API server. + Raw *metav1.UpdateOptions +} + +// AsUpdateOptions returns these options as a metav1.UpdateOptions. +// This may mutate the Raw field. +func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { + if o == nil { + return &metav1.UpdateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.UpdateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) + } + return o +} + +var _ UpdateOption = &UpdateOptions{} + +// ApplyToUpdate implements UpdateOption. +func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { + if o.DryRun != nil { + uo.DryRun = o.DryRun + } + if o.FieldManager != "" { + uo.FieldManager = o.FieldManager + } + if o.Raw != nil { + uo.Raw = o.Raw + } +} + +// }}} + +// {{{ Patch Options + +// PatchOptions contains options for patch requests. +type PatchOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw PatchOptions, as passed to the API server. + Raw *metav1.PatchOptions +} + +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) + } + return o +} + +// AsPatchOptions returns these options as a metav1.PatchOptions. +// This may mutate the Raw field. +func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { + if o == nil { + return &metav1.PatchOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.PatchOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.Force = o.Force + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +var _ PatchOption = &PatchOptions{} + +// ApplyToPatch implements PatchOptions. +func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { + if o.DryRun != nil { + po.DryRun = o.DryRun + } + if o.Force != nil { + po.Force = o.Force + } + if o.FieldManager != "" { + po.FieldManager = o.FieldManager + } + if o.Raw != nil { + po.Raw = o.Raw + } +} + +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} + +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions +} + +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + return o +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption. +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + o.ApplyToList(&do.ListOptions) + o.ApplyToDelete(&do.DeleteOptions) +} + +// }}} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go new file mode 100644 index 000000000..10984c534 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +var ( + // Apply uses server-side apply to patch the given object. + Apply Patch = applyPatch{} + + // Merge uses the raw object as a merge patch, without modifications. + // Use MergeFrom if you wish to compute a diff instead. + Merge Patch = mergePatch{} +) + +type patch struct { + patchType types.PatchType + data []byte +} + +// Type implements Patch. +func (s *patch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *patch) Data(obj Object) ([]byte, error) { + return s.data, nil +} + +// RawPatch constructs a new Patch with the given PatchType and data. +func RawPatch(patchType types.PatchType, data []byte) Patch { + return &patch{patchType, data} +} + +// MergeFromWithOptimisticLock can be used if clients want to make sure a patch +// is being applied to the latest resource version of an object. +// +// The behavior is similar to what an Update would do, without the need to send the +// whole object. Usually this method is useful if you might have multiple clients +// acting on the same object and the same API version, but with different versions of the Go structs. +// +// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C. +// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not. +type MergeFromWithOptimisticLock struct{} + +// ApplyToMergeFrom applies this configuration to the given patch options. +func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) { + in.OptimisticLock = true +} + +// MergeFromOption is some configuration that modifies options for a merge-from patch data. +type MergeFromOption interface { + // ApplyToMergeFrom applies this configuration to the given patch options. + ApplyToMergeFrom(*MergeFromOptions) +} + +// MergeFromOptions contains options to generate a merge-from patch data. +type MergeFromOptions struct { + // OptimisticLock, when true, includes `metadata.resourceVersion` into the final + // patch data. If the `resourceVersion` field doesn't match what's stored, + // the operation results in a conflict and clients will need to try again. + OptimisticLock bool +} + +type mergeFromPatch struct { + patchType types.PatchType + createPatch func(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) + from Object + opts MergeFromOptions +} + +// Type implements Patch. +func (s *mergeFromPatch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *mergeFromPatch) Data(obj Object) ([]byte, error) { + original := s.from + modified := obj + + if s.opts.OptimisticLock { + version := original.GetResourceVersion() + if len(version) == 0 { + return nil, fmt.Errorf("cannot use OptimisticLock, object %q does not have any resource version we can use", original) + } + + original = original.DeepCopyObject().(Object) + original.SetResourceVersion("") + + modified = modified.DeepCopyObject().(Object) + modified.SetResourceVersion(version) + } + + originalJSON, err := json.Marshal(original) + if err != nil { + return nil, err + } + + modifiedJSON, err := json.Marshal(modified) + if err != nil { + return nil, err + } + + data, err := s.createPatch(originalJSON, modifiedJSON, obj) + if err != nil { + return nil, err + } + + return data, nil +} + +func createMergePatch(originalJSON, modifiedJSON []byte, _ interface{}) ([]byte, error) { + return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) +} + +func createStrategicMergePatch(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) { + return strategicpatch.CreateTwoWayMergePatch(originalJSON, modifiedJSON, dataStruct) +} + +// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +func MergeFrom(obj Object) Patch { + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj} +} + +// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base. +// See MergeFrom for more details. +func MergeFromWithOptions(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj, opts: *options} +} + +// StrategicMergeFrom creates a Patch that patches using the strategic-merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +// Please note, that CRDs don't support strategic-merge-patch, see +// https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility +func StrategicMergeFrom(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.StrategicMergePatchType, createPatch: createStrategicMergePatch, from: obj, opts: *options} +} + +// mergePatch uses a raw merge strategy to patch the object. +type mergePatch struct{} + +// Type implements Patch. +func (p mergePatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (p mergePatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} + +// applyPatch uses server-side apply to patch the object. +type applyPatch struct{} + +// Type implements Patch. +func (p applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +// Data implements Patch. +func (p applyPatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go new file mode 100644 index 000000000..bf4b861f3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. +type NewDelegatingClientInput struct { + CacheReader Reader + Client Client + UncachedObjects []Object + CacheUnstructured bool +} + +// NewDelegatingClient creates a new delegating client. +// +// A delegating client forms a Client by composing separate reader, writer and +// statusclient interfaces. This way, you can have an Client that reads from a +// cache and writes to the API server. +func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { + uncachedGVKs := map[schema.GroupVersionKind]struct{}{} + for _, obj := range in.UncachedObjects { + gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) + if err != nil { + return nil, err + } + uncachedGVKs[gvk] = struct{}{} + } + + return &delegatingClient{ + scheme: in.Client.Scheme(), + mapper: in.Client.RESTMapper(), + Reader: &delegatingReader{ + CacheReader: in.CacheReader, + ClientReader: in.Client, + scheme: in.Client.Scheme(), + uncachedGVKs: uncachedGVKs, + cacheUnstructured: in.CacheUnstructured, + }, + Writer: in.Client, + StatusClient: in.Client, + }, nil +} + +type delegatingClient struct { + Reader + Writer + StatusClient + + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// Scheme returns the scheme this client is using. +func (d *delegatingClient) Scheme() *runtime.Scheme { + return d.scheme +} + +// RESTMapper returns the rest mapper this client is using. +func (d *delegatingClient) RESTMapper() meta.RESTMapper { + return d.mapper +} + +// delegatingReader forms a Reader that will cause Get and List requests for +// unstructured types to use the ClientReader while requests for any other type +// of object with use the CacheReader. This avoids accidentally caching the +// entire cluster in the common case of loading arbitrary unstructured objects +// (e.g. from OwnerReferences). +type delegatingReader struct { + CacheReader Reader + ClientReader Reader + + uncachedGVKs map[schema.GroupVersionKind]struct{} + scheme *runtime.Scheme + cacheUnstructured bool +} + +func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, d.scheme) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := d.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !d.cacheUnstructured { + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + return isUnstructured || isUnstructuredList, nil + } + return false, nil +} + +// Get retrieves an obj for a given object key from the Kubernetes Cluster. +func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object) error { + if isUncached, err := d.shouldBypassCache(obj); err != nil { + return err + } else if isUncached { + return d.ClientReader.Get(ctx, key, obj) + } + return d.CacheReader.Get(ctx, key, obj) +} + +// List retrieves list of objects for a given namespace and list options. +func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { + if isUncached, err := d.shouldBypassCache(list); err != nil { + return err + } else if isUncached { + return d.ClientReader.List(ctx, list, opts...) + } + return d.CacheReader.List(ctx, list, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go new file mode 100644 index 000000000..dde7b21f2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -0,0 +1,205 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &typedClient{} +var _ Writer = &typedClient{} +var _ StatusWriter = &typedClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type typedClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Update implements client.Client. +func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := &UpdateOptions{} + updateOpts.ApplyOptions(opts) + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Delete implements client.Client. +func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + return r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name).Do(ctx).Into(obj) +} + +// List implements client.Client. +func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// UpdateStatus used by StatusWriter to write status. +func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + // TODO(droot): examine the returned error and check if it error needs to be + // wrapped to improve the UX ? + // It will be nice to receive an error saying the object doesn't implement + // status subresource and check CRD definition + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// PatchStatus used by StatusWriter to write status. +func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go new file mode 100644 index 000000000..819527e70 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -0,0 +1,271 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &unstructuredClient{} +var _ Writer = &unstructuredClient{} +var _ StatusWriter = &unstructuredClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type unstructuredClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + result := o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Update implements client.Client. +func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) + result := o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Delete implements client.Client. +func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + result := r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + Name(key.Name). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + + return result +} + +// List implements client.Client. +func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + u, ok := obj.(*unstructured.UnstructuredList) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(obj). + VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + result := o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource("status"). + Body(data). + VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(u) + + u.SetGroupVersionKind(gvk) + return result +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go new file mode 100644 index 000000000..70490664b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go @@ -0,0 +1,114 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +// NewWithWatch returns a new WithWatch. +func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { + client, err := newClient(config, options) + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + return &watchingClient{client: client, dynamic: dynamicClient}, nil +} + +type watchingClient struct { + *client + dynamic dynamic.Interface +} + +func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { + switch l := list.(type) { + case *unstructured.UnstructuredList: + return w.unstructuredWatch(ctx, l, opts...) + case *metav1.PartialObjectMetadataList: + return w.metadataWatch(ctx, l, opts...) + default: + return w.typedWatch(ctx, l, opts...) + } +} + +func (w *watchingClient) listOpts(opts ...ListOption) ListOptions { + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + if listOpts.Raw == nil { + listOpts.Raw = &metav1.ListOptions{} + } + listOpts.Raw.Watch = true + + return listOpts +} + +func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := w.listOpts(opts...) + + resInt, err := w.client.metadataClient.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return nil, err + } + + return resInt.Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + r, err := w.client.unstructuredClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + if listOpts.Namespace != "" && r.isNamespaced() { + return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) + } + return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.typedClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.typedClient.paramCodec). + Watch(ctx) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go new file mode 100644 index 000000000..4b8ee8e7c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -0,0 +1,270 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "errors" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" +) + +// Cluster provides various methods to interact with a cluster. +type Cluster interface { + // SetFields will set any dependencies on an object for which the object has implemented the inject + // interface - e.g. inject.Client. + // Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. + SetFields(interface{}) error + + // GetConfig returns an initialized Config + GetConfig() *rest.Config + + // GetScheme returns an initialized Scheme + GetScheme() *runtime.Scheme + + // GetClient returns a client configured with the Config. This client may + // not be a fully "direct" client -- it may read from a cache, for + // instance. See Options.NewClient for more information on how the default + // implementation works. + GetClient() client.Client + + // GetFieldIndexer returns a client.FieldIndexer configured with the client + GetFieldIndexer() client.FieldIndexer + + // GetCache returns a cache.Cache + GetCache() cache.Cache + + // GetEventRecorderFor returns a new EventRecorder for the provided name + GetEventRecorderFor(name string) record.EventRecorder + + // GetRESTMapper returns a RESTMapper + GetRESTMapper() meta.RESTMapper + + // GetAPIReader returns a reader that will be configured to use the API server. + // This should be used sparingly and only when the client does not fit your + // use case. + GetAPIReader() client.Reader + + // Start starts the cluster + Start(ctx context.Context) error +} + +// Options are the possible options that can be configured for a Cluster. +type Options struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs + MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + + // Logger is the logger that should be used by this Cluster. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + SyncPeriod *time.Duration + + // Namespace if specified restricts the manager's cache to watch objects in + // the desired namespace Defaults to all namespaces + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources the cache + // will only hold objects from the desired namespace. + Namespace string + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + NewCache cache.NewCacheFunc + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create the default DelegatingClient that will + // use the cache for reads and the client for writes. + NewClient NewClientFunc + + // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it + // for the given objects. + ClientDisableCacheFor []client.Object + + // DryRunClient specifies whether the client should be configured to enforce + // dryRun mode. + DryRunClient bool + + // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API + // Use this to customize the event correlator and spam filter + // + // Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers + // is shorter than the lifetime of your process. + EventBroadcaster record.EventBroadcaster + + // makeBroadcaster allows deferring the creation of the broadcaster to + // avoid leaking goroutines if we never call Start on this manager. It also + // returns whether or not this is a "owned" broadcaster, and as such should be + // stopped with the manager. + makeBroadcaster intrec.EventBroadcasterProducer + + // Dependency injection for testing + newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) +} + +// Option can be used to manipulate Options. +type Option func(*Options) + +// New constructs a brand new cluster. +func New(config *rest.Config, opts ...Option) (Cluster, error) { + if config == nil { + return nil, errors.New("must specify Config") + } + + options := Options{} + for _, opt := range opts { + opt(&options) + } + options = setOptionsDefaults(options) + + // Create the mapper provider + mapper, err := options.MapperProvider(config) + if err != nil { + options.Logger.Error(err, "Failed to get API Group-Resources") + return nil, err + } + + // Create the cache for the cached read client and registering informers + cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace}) + if err != nil { + return nil, err + } + + clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper} + + apiReader, err := client.New(config, clientOptions) + if err != nil { + return nil, err + } + + writeObj, err := options.NewClient(cache, config, clientOptions, options.ClientDisableCacheFor...) + if err != nil { + return nil, err + } + + if options.DryRunClient { + writeObj = client.NewDryRunClient(writeObj) + } + + // Create the recorder provider to inject event recorders for the components. + // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific + // to the particular controller that it's being injected into, rather than a generic one like is here. + recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + + return &cluster{ + config: config, + scheme: options.Scheme, + cache: cache, + fieldIndexes: cache, + client: writeObj, + apiReader: apiReader, + recorderProvider: recorderProvider, + mapper: mapper, + logger: options.Logger, + }, nil +} + +// setOptionsDefaults set default values for Options fields. +func setOptionsDefaults(options Options) Options { + // Use the Kubernetes client-go scheme if none is specified + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + if options.MapperProvider == nil { + options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { + return apiutil.NewDynamicRESTMapper(c) + } + } + + // Allow users to define how to create a new client + if options.NewClient == nil { + options.NewClient = DefaultNewClient + } + + // Allow newCache to be mocked + if options.NewCache == nil { + options.NewCache = cache.New + } + + // Allow newRecorderProvider to be mocked + if options.newRecorderProvider == nil { + options.newRecorderProvider = intrec.NewProvider + } + + // This is duplicated with pkg/manager, we need it here to provide + // the user with an EventBroadcaster and there for the Leader election + if options.EventBroadcaster == nil { + // defer initialization to avoid leaking by default + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return record.NewBroadcaster(), true + } + } else { + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return options.EventBroadcaster, false + } + } + + if options.Logger.GetSink() == nil { + options.Logger = logf.RuntimeLog.WithName("cluster") + } + + return options +} + +// NewClientFunc allows a user to define how to create a client. +type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) + +// DefaultNewClient creates the default caching client. +func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { + c, err := client.New(config, options) + if err != nil { + return nil, err + } + + return client.NewDelegatingClient(client.NewDelegatingClientInput{ + CacheReader: cache, + Client: c, + UncachedObjects: uncachedObjects, + }) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go new file mode 100644 index 000000000..125e1d144 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +type cluster struct { + // config is the rest.config used to talk to the apiserver. Required. + config *rest.Config + + // scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults + // to scheme.scheme. + scheme *runtime.Scheme + + cache cache.Cache + + // TODO(directxman12): Provide an escape hatch to get individual indexers + // client is the client injected into Controllers (and EventHandlers, Sources and Predicates). + client client.Client + + // apiReader is the reader that will make requests to the api server and not the cache. + apiReader client.Reader + + // fieldIndexes knows how to add field indexes over the Cache used by this controller, + // which can later be consumed via field selectors from the injected client. + fieldIndexes client.FieldIndexer + + // recorderProvider is used to generate event recorders that will be injected into Controllers + // (and EventHandlers, Sources and Predicates). + recorderProvider *intrec.Provider + + // mapper is used to map resources to kind, and map kind and version. + mapper meta.RESTMapper + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + logger logr.Logger +} + +func (c *cluster) SetFields(i interface{}) error { + if _, err := inject.ConfigInto(c.config, i); err != nil { + return err + } + if _, err := inject.ClientInto(c.client, i); err != nil { + return err + } + if _, err := inject.APIReaderInto(c.apiReader, i); err != nil { + return err + } + if _, err := inject.SchemeInto(c.scheme, i); err != nil { + return err + } + if _, err := inject.CacheInto(c.cache, i); err != nil { + return err + } + if _, err := inject.MapperInto(c.mapper, i); err != nil { + return err + } + return nil +} + +func (c *cluster) GetConfig() *rest.Config { + return c.config +} + +func (c *cluster) GetClient() client.Client { + return c.client +} + +func (c *cluster) GetScheme() *runtime.Scheme { + return c.scheme +} + +func (c *cluster) GetFieldIndexer() client.FieldIndexer { + return c.fieldIndexes +} + +func (c *cluster) GetCache() cache.Cache { + return c.cache +} + +func (c *cluster) GetEventRecorderFor(name string) record.EventRecorder { + return c.recorderProvider.GetEventRecorderFor(name) +} + +func (c *cluster) GetRESTMapper() meta.RESTMapper { + return c.mapper +} + +func (c *cluster) GetAPIReader() client.Reader { + return c.apiReader +} + +func (c *cluster) GetLogger() logr.Logger { + return c.logger +} + +func (c *cluster) Start(ctx context.Context) error { + defer c.recorderProvider.Stop(ctx) + return c.cache.Start(ctx) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go new file mode 100644 index 000000000..517b172e5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go @@ -0,0 +1,112 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "os" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" +) + +// ControllerManagerConfiguration defines the functions necessary to parse a config file +// and to configure the Options struct for the ctrl.Manager. +type ControllerManagerConfiguration interface { + runtime.Object + + // Complete returns the versioned configuration + Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) +} + +// DeferredFileLoader is used to configure the decoder for loading controller +// runtime component config types. +type DeferredFileLoader struct { + ControllerManagerConfiguration + path string + scheme *runtime.Scheme + once sync.Once + err error +} + +// File will set up the deferred file loader for the configuration +// this will also configure the defaults for the loader if nothing is +// +// Defaults: +// Path: "./config.yaml" +// Kind: GenericControllerManagerConfiguration +func File() *DeferredFileLoader { + scheme := runtime.NewScheme() + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + return &DeferredFileLoader{ + path: "./config.yaml", + ControllerManagerConfiguration: &v1alpha1.ControllerManagerConfiguration{}, + scheme: scheme, + } +} + +// Complete will use sync.Once to set the scheme. +func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) { + d.once.Do(d.loadFile) + if d.err != nil { + return v1alpha1.ControllerManagerConfigurationSpec{}, d.err + } + return d.ControllerManagerConfiguration.Complete() +} + +// AtPath will set the path to load the file for the decoder. +func (d *DeferredFileLoader) AtPath(path string) *DeferredFileLoader { + d.path = path + return d +} + +// OfKind will set the type to be used for decoding the file into. +func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *DeferredFileLoader { + d.ControllerManagerConfiguration = obj + return d +} + +// InjectScheme will configure the scheme to be used for decoding the file. +func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { + d.scheme = scheme + return nil +} + +// loadFile is used from the mutex.Once to load the file. +func (d *DeferredFileLoader) loadFile() { + if d.scheme == nil { + d.err = fmt.Errorf("scheme not supplied to controller configuration loader") + return + } + + content, err := os.ReadFile(d.path) + if err != nil { + d.err = fmt.Errorf("could not read file at %s", d.path) + return + } + + codecs := serializer.NewCodecFactory(d.scheme) + + // Regardless of if the bytes are of any external version, + // it will be read successfully and converted into the internal version + if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, d.ControllerManagerConfiguration); err != nil { + d.err = fmt.Errorf("could not decode file into runtime.Object") + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go new file mode 100644 index 000000000..ebd8243f3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package config contains functionality for interacting with ComponentConfig +// files +// +// DeferredFileLoader +// +// This uses a deferred file decoding allowing you to chain your configuration +// setup. You can pass this into manager.Options#File and it will load your +// config. +package config diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go new file mode 100644 index 000000000..1e3adbafb --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 provides the ControllerManagerConfiguration used for +// configuring ctrl.Manager +// +kubebuilder:object:generate=true +package v1alpha1 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go new file mode 100644 index 000000000..9efdbc066 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go @@ -0,0 +1,37 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + SchemeBuilder.Register(&ControllerManagerConfiguration{}) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go new file mode 100644 index 000000000..e67b62e51 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go @@ -0,0 +1,157 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1alpha1 "k8s.io/component-base/config/v1alpha1" +) + +// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. +type ControllerManagerConfigurationSpec struct { + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // +optional + SyncPeriod *metav1.Duration `json:"syncPeriod,omitempty"` + + // LeaderElection is the LeaderElection config to be used when configuring + // the manager.Manager leader election + // +optional + LeaderElection *configv1alpha1.LeaderElectionConfiguration `json:"leaderElection,omitempty"` + + // CacheNamespace if specified restricts the manager's cache to watch objects in + // the desired namespace Defaults to all namespaces + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources the cache + // will only hold objects from the desired namespace. + // +optional + CacheNamespace string `json:"cacheNamespace,omitempty"` + + // GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. + // To disable graceful shutdown, set to time.Duration(0) + // To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) + // The graceful shutdown is skipped for safety reasons in case the leader election lease is lost. + GracefulShutdownTimeout *metav1.Duration `json:"gracefulShutDown,omitempty"` + + // Controller contains global configuration options for controllers + // registered within this manager. + // +optional + Controller *ControllerConfigurationSpec `json:"controller,omitempty"` + + // Metrics contains thw controller metrics configuration + // +optional + Metrics ControllerMetrics `json:"metrics,omitempty"` + + // Health contains the controller health configuration + // +optional + Health ControllerHealth `json:"health,omitempty"` + + // Webhook contains the controllers webhook configuration + // +optional + Webhook ControllerWebhook `json:"webhook,omitempty"` +} + +// ControllerConfigurationSpec defines the global configuration for +// controllers registered with the manager. +type ControllerConfigurationSpec struct { + // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation + // allowed for that controller. + // + // When a controller is registered within this manager using the builder utilities, + // users have to specify the type the controller reconciles in the For(...) call. + // If the object's kind passed matches one of the keys in this map, the concurrency + // for that controller is set to the number specified. + // + // The key is expected to be consistent in form with GroupKind.String(), + // e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`. + // + // +optional + GroupKindConcurrency map[string]int `json:"groupKindConcurrency,omitempty"` + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + // +optional + CacheSyncTimeout *time.Duration `json:"cacheSyncTimeout,omitempty"` +} + +// ControllerMetrics defines the metrics configs. +type ControllerMetrics struct { + // BindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + // +optional + BindAddress string `json:"bindAddress,omitempty"` +} + +// ControllerHealth defines the health configs. +type ControllerHealth struct { + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // +optional + HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"` + + // ReadinessEndpointName, defaults to "readyz" + // +optional + ReadinessEndpointName string `json:"readinessEndpointName,omitempty"` + + // LivenessEndpointName, defaults to "healthz" + // +optional + LivenessEndpointName string `json:"livenessEndpointName,omitempty"` +} + +// ControllerWebhook defines the webhook server for the controller. +type ControllerWebhook struct { + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port. + // +optional + Port *int `json:"port,omitempty"` + + // Host is the hostname that the webhook server binds to. + // It is used to set webhook.Server.Host. + // +optional + Host string `json:"host,omitempty"` + + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate + // must be named tls.key and tls.crt, respectively. + // +optional + CertDir string `json:"certDir,omitempty"` +} + +// +kubebuilder:object:root=true + +// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. +type ControllerManagerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // ControllerManagerConfiguration returns the contfigurations for controllers + ControllerManagerConfigurationSpec `json:",inline"` +} + +// Complete returns the configuration for controller-runtime. +func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { + return *c, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..5329bef66 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,153 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + configv1alpha1 "k8s.io/component-base/config/v1alpha1" + timex "time" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerConfigurationSpec) DeepCopyInto(out *ControllerConfigurationSpec) { + *out = *in + if in.GroupKindConcurrency != nil { + in, out := &in.GroupKindConcurrency, &out.GroupKindConcurrency + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CacheSyncTimeout != nil { + in, out := &in.CacheSyncTimeout, &out.CacheSyncTimeout + *out = new(timex.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfigurationSpec. +func (in *ControllerConfigurationSpec) DeepCopy() *ControllerConfigurationSpec { + if in == nil { + return nil + } + out := new(ControllerConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerHealth) DeepCopyInto(out *ControllerHealth) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerHealth. +func (in *ControllerHealth) DeepCopy() *ControllerHealth { + if in == nil { + return nil + } + out := new(ControllerHealth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfiguration) DeepCopyInto(out *ControllerManagerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ControllerManagerConfigurationSpec.DeepCopyInto(&out.ControllerManagerConfigurationSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfiguration. +func (in *ControllerManagerConfiguration) DeepCopy() *ControllerManagerConfiguration { + if in == nil { + return nil + } + out := new(ControllerManagerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerManagerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerManagerConfigurationSpec) DeepCopyInto(out *ControllerManagerConfigurationSpec) { + *out = *in + if in.SyncPeriod != nil { + in, out := &in.SyncPeriod, &out.SyncPeriod + *out = new(v1.Duration) + **out = **in + } + if in.LeaderElection != nil { + in, out := &in.LeaderElection, &out.LeaderElection + *out = new(configv1alpha1.LeaderElectionConfiguration) + (*in).DeepCopyInto(*out) + } + if in.GracefulShutdownTimeout != nil { + in, out := &in.GracefulShutdownTimeout, &out.GracefulShutdownTimeout + *out = new(v1.Duration) + **out = **in + } + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(ControllerConfigurationSpec) + (*in).DeepCopyInto(*out) + } + out.Metrics = in.Metrics + out.Health = in.Health + in.Webhook.DeepCopyInto(&out.Webhook) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerManagerConfigurationSpec. +func (in *ControllerManagerConfigurationSpec) DeepCopy() *ControllerManagerConfigurationSpec { + if in == nil { + return nil + } + out := new(ControllerManagerConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerMetrics) DeepCopyInto(out *ControllerMetrics) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerMetrics. +func (in *ControllerMetrics) DeepCopy() *ControllerMetrics { + if in == nil { + return nil + } + out := new(ControllerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerWebhook) DeepCopyInto(out *ControllerWebhook) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerWebhook. +func (in *ControllerWebhook) DeepCopy() *ControllerWebhook { + if in == nil { + return nil + } + out := new(ControllerWebhook) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go new file mode 100644 index 000000000..8e3d8591d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -0,0 +1,155 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/internal/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/ratelimiter" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// Options are the arguments for creating a new Controller. +type Options struct { + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // Reconciler reconciles an object + Reconciler reconcile.Reconciler + + // RateLimiter is used to limit how frequently requests may be queued. + // Defaults to MaxOfRateLimiter which has both overall and per-item rate limiting. + // The overall is a token bucket and the per-item is exponential. + RateLimiter ratelimiter.RateLimiter + + // LogConstructor is used to construct a logger used for this controller and passed + // to each reconciliation via the context field. + LogConstructor func(request *reconcile.Request) logr.Logger + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + RecoverPanic bool +} + +// Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests +// from source.Sources. Work is performed through the reconcile.Reconciler for each enqueued item. +// Work typically is reads and writes Kubernetes objects to make the system state match the state specified +// in the object Spec. +type Controller interface { + // Reconciler is called to reconcile an object by Namespace/Name + reconcile.Reconciler + + // Watch takes events provided by a Source and uses the EventHandler to + // enqueue reconcile.Requests in response to the events. + // + // Watch may be provided one or more Predicates to filter events before + // they are given to the EventHandler. Events will be passed to the + // EventHandler if all provided Predicates evaluate to true. + Watch(src source.Source, eventhandler handler.EventHandler, predicates ...predicate.Predicate) error + + // Start starts the controller. Start blocks until the context is closed or a + // controller has an error starting. + Start(ctx context.Context) error + + // GetLogger returns this controller logger prefilled with basic information. + GetLogger() logr.Logger +} + +// New returns a new Controller registered with the Manager. The Manager will ensure that shared Caches have +// been synced before the Controller is Started. +func New(name string, mgr manager.Manager, options Options) (Controller, error) { + c, err := NewUnmanaged(name, mgr, options) + if err != nil { + return nil, err + } + + // Add the controller as a Manager components + return c, mgr.Add(c) +} + +// NewUnmanaged returns a new controller without adding it to the manager. The +// caller is responsible for starting the returned controller. +func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller, error) { + if options.Reconciler == nil { + return nil, fmt.Errorf("must specify Reconciler") + } + + if len(name) == 0 { + return nil, fmt.Errorf("must specify Name for Controller") + } + + if options.LogConstructor == nil { + log := mgr.GetLogger().WithValues( + "controller", name, + ) + options.LogConstructor = func(req *reconcile.Request) logr.Logger { + log := log + if req != nil { + log = log.WithValues( + "object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + ) + } + return log + } + } + + if options.MaxConcurrentReconciles <= 0 { + options.MaxConcurrentReconciles = 1 + } + + if options.CacheSyncTimeout == 0 { + options.CacheSyncTimeout = 2 * time.Minute + } + + if options.RateLimiter == nil { + options.RateLimiter = workqueue.DefaultControllerRateLimiter() + } + + // Inject dependencies into Reconciler + if err := mgr.SetFields(options.Reconciler); err != nil { + return nil, err + } + + // Create controller with dependencies set + return &controller.Controller{ + Do: options.Reconciler, + MakeQueue: func() workqueue.RateLimitingInterface { + return workqueue.NewNamedRateLimitingQueue(options.RateLimiter, name) + }, + MaxConcurrentReconciles: options.MaxConcurrentReconciles, + CacheSyncTimeout: options.CacheSyncTimeout, + SetFields: mgr.SetFields, + Name: name, + LogConstructor: options.LogConstructor, + RecoverPanic: options.RecoverPanic, + }, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go new file mode 100644 index 000000000..aa53a77d4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -0,0 +1,394 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// AlreadyOwnedError is an error returned if the object you are trying to assign +// a controller reference is already owned by another controller Object is the +// subject and Owner is the reference for the current owner. +type AlreadyOwnedError struct { + Object metav1.Object + Owner metav1.OwnerReference +} + +func (e *AlreadyOwnedError) Error() string { + return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name) +} + +func newAlreadyOwnedError(obj metav1.Object, owner metav1.OwnerReference) *AlreadyOwnedError { + return &AlreadyOwnedError{ + Object: obj, + Owner: owner, + } +} + +// SetControllerReference sets owner as a Controller OwnerReference on controlled. +// This is used for garbage collection of the controlled object and for +// reconciling the owner object on changes to controlled (with a Watch + EnqueueRequestForOwner). +// Since only one OwnerReference can be a controller, it returns an error if +// there is another OwnerReference with Controller flag set. +func SetControllerReference(owner, controlled metav1.Object, scheme *runtime.Scheme) error { + // Validate the owner. + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetControllerReference", owner) + } + if err := validateOwner(owner, controlled); err != nil { + return err + } + + // Create a new controller ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: pointer.BoolPtr(true), + Controller: pointer.BoolPtr(true), + } + + // Return early with an error if the object is already controlled. + if existing := metav1.GetControllerOf(controlled); existing != nil && !referSameObject(*existing, ref) { + return newAlreadyOwnedError(controlled, *existing) + } + + // Update owner references and return. + upsertOwnerRef(ref, controlled) + return nil +} + +// SetOwnerReference is a helper method to make sure the given object contains an object reference to the object provided. +// This allows you to declare that owner has a dependency on the object without specifying it as a controller. +// If a reference to the same object already exists, it'll be overwritten with the newly provided version. +func SetOwnerReference(owner, object metav1.Object, scheme *runtime.Scheme) error { + // Validate the owner. + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("%T is not a runtime.Object, cannot call SetOwnerReference", owner) + } + if err := validateOwner(owner, object); err != nil { + return err + } + + // Create a new owner ref. + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + ref := metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + UID: owner.GetUID(), + Name: owner.GetName(), + } + + // Update owner references and return. + upsertOwnerRef(ref, object) + return nil +} + +func upsertOwnerRef(ref metav1.OwnerReference, object metav1.Object) { + owners := object.GetOwnerReferences() + if idx := indexOwnerRef(owners, ref); idx == -1 { + owners = append(owners, ref) + } else { + owners[idx] = ref + } + object.SetOwnerReferences(owners) +} + +// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. +func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { + for index, r := range ownerReferences { + if referSameObject(r, ref) { + return index + } + } + return -1 +} + +func validateOwner(owner, object metav1.Object) error { + ownerNs := owner.GetNamespace() + if ownerNs != "" { + objNs := object.GetNamespace() + if objNs == "" { + return fmt.Errorf("cluster-scoped resource must not have a namespace-scoped owner, owner's namespace %s", ownerNs) + } + if ownerNs != objNs { + return fmt.Errorf("cross-namespace owner references are disallowed, owner's namespace %s, obj's namespace %s", owner.GetNamespace(), object.GetNamespace()) + } + } + return nil +} + +// Returns true if a and b point to the same object. +func referSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name +} + +// OperationResult is the action result of a CreateOrUpdate call. +type OperationResult string + +const ( // They should complete the sentence "Deployment default/foo has been ..." + // OperationResultNone means that the resource has not been changed. + OperationResultNone OperationResult = "unchanged" + // OperationResultCreated means that a new resource is created. + OperationResultCreated OperationResult = "created" + // OperationResultUpdated means that an existing resource is updated. + OperationResultUpdated OperationResult = "updated" + // OperationResultUpdatedStatus means that an existing resource and its status is updated. + OperationResultUpdatedStatus OperationResult = "updatedStatus" + // OperationResultUpdatedStatusOnly means that only an existing status is updated. + OperationResultUpdatedStatusOnly OperationResult = "updatedStatusOnly" +) + +// CreateOrUpdate creates or updates the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the existing +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return OperationResultNone, err + } + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + existing := obj.DeepCopyObject() //nolint + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + + if equality.Semantic.DeepEqual(existing, obj) { + return OperationResultNone, nil + } + + if err := c.Update(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultUpdated, nil +} + +// CreateOrPatch creates or patches the given object in the Kubernetes +// cluster. The object's desired state must be reconciled with the before +// state inside the passed in callback MutateFn. +// +// The MutateFn is called regardless of creating or updating an object. +// +// It returns the executed operation and an error. +func CreateOrPatch(ctx context.Context, c client.Client, obj client.Object, f MutateFn) (OperationResult, error) { + key := client.ObjectKeyFromObject(obj) + if err := c.Get(ctx, key, obj); err != nil { + if !apierrors.IsNotFound(err) { + return OperationResultNone, err + } + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + if err := c.Create(ctx, obj); err != nil { + return OperationResultNone, err + } + return OperationResultCreated, nil + } + + // Create patches for the object and its possible status. + objPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + statusPatch := client.MergeFrom(obj.DeepCopyObject().(client.Object)) + + // Create a copy of the original object as well as converting that copy to + // unstructured data. + before, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj.DeepCopyObject()) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + beforeStatus, hasBeforeStatus, err := unstructured.NestedFieldCopy(before, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasBeforeStatus { + unstructured.RemoveNestedField(before, "status") + } + + // Mutate the original object. + if f != nil { + if err := mutate(f, key, obj); err != nil { + return OperationResultNone, err + } + } + + // Convert the resource to unstructured to compare against our before copy. + after, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return OperationResultNone, err + } + + // Attempt to extract the status from the resource for easier comparison later + afterStatus, hasAfterStatus, err := unstructured.NestedFieldCopy(after, "status") + if err != nil { + return OperationResultNone, err + } + + // If the resource contains a status then remove it from the unstructured + // copy to avoid unnecessary patching later. + if hasAfterStatus { + unstructured.RemoveNestedField(after, "status") + } + + result := OperationResultNone + + if !reflect.DeepEqual(before, after) { + // Only issue a Patch if the before and after resources (minus status) differ + if err := c.Patch(ctx, obj, objPatch); err != nil { + return result, err + } + result = OperationResultUpdated + } + + if (hasBeforeStatus || hasAfterStatus) && !reflect.DeepEqual(beforeStatus, afterStatus) { + // Only issue a Status Patch if the resource has a status and the beforeStatus + // and afterStatus copies differ + if result == OperationResultUpdated { + // If Status was replaced by Patch before, set it to afterStatus + objectAfterPatch, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return result, err + } + if err = unstructured.SetNestedField(objectAfterPatch, afterStatus, "status"); err != nil { + return result, err + } + // If Status was replaced by Patch before, restore patched structure to the obj + if err = runtime.DefaultUnstructuredConverter.FromUnstructured(objectAfterPatch, obj); err != nil { + return result, err + } + } + if err := c.Status().Patch(ctx, obj, statusPatch); err != nil { + return result, err + } + if result == OperationResultUpdated { + result = OperationResultUpdatedStatus + } else { + result = OperationResultUpdatedStatusOnly + } + } + + return result, nil +} + +// mutate wraps a MutateFn and applies validation to its result. +func mutate(f MutateFn, key client.ObjectKey, obj client.Object) error { + if err := f(); err != nil { + return err + } + if newKey := client.ObjectKeyFromObject(obj); key != newKey { + return fmt.Errorf("MutateFn cannot mutate object name and/or object namespace") + } + return nil +} + +// MutateFn is a function which mutates the existing object into its desired state. +type MutateFn func() error + +// AddFinalizer accepts an Object and adds the provided finalizer if not present. +// It returns an indication of whether it updated the object's list of finalizers. +func AddFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return false + } + } + o.SetFinalizers(append(f, finalizer)) + return true +} + +// RemoveFinalizer accepts an Object and removes the provided finalizer if present. +// It returns an indication of whether it updated the object's list of finalizers. +func RemoveFinalizer(o client.Object, finalizer string) (finalizersUpdated bool) { + f := o.GetFinalizers() + for i := 0; i < len(f); i++ { + if f[i] == finalizer { + f = append(f[:i], f[i+1:]...) + i-- + finalizersUpdated = true + } + } + o.SetFinalizers(f) + return +} + +// ContainsFinalizer checks an Object that the provided finalizer is present. +func ContainsFinalizer(o client.Object, finalizer string) bool { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// Object allows functions to work indistinctly with any resource that +// implements both Object interfaces. +// +// Deprecated: Use client.Object instead. +type Object = client.Object diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go new file mode 100644 index 000000000..ab386b29c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package controllerutil contains utility functions for working with and implementing Controllers. +*/ +package controllerutil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/doc.go new file mode 100644 index 000000000..667b14fdd --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package controller provides types and functions for building Controllers. Controllers implement Kubernetes APIs. + +Creation + +To create a new Controller, first create a manager.Manager and pass it to the controller.New function. +The Controller MUST be started by calling Manager.Start. +*/ +package controller diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go new file mode 100644 index 000000000..da32ab48e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/conversion/conversion.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides interface definitions that an API Type needs to +implement for it to be supported by the generic conversion webhook handler +defined under pkg/webhook/conversion. +*/ +package conversion + +import "k8s.io/apimachinery/pkg/runtime" + +// Convertible defines capability of a type to convertible i.e. it can be converted to/from a hub type. +type Convertible interface { + runtime.Object + ConvertTo(dst Hub) error + ConvertFrom(src Hub) error +} + +// Hub marks that a given type is the hub type for conversion. This means that +// all conversions will first convert to the hub type, then convert from the hub +// type to the destination type. All types besides the hub type should implement +// Convertible. +type Hub interface { + runtime.Object + Hub() +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go new file mode 100644 index 000000000..adba3bbc1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package event contains the definitions for the Event types produced by source.Sources and transformed into +reconcile.Requests by handler.EventHandler. + +You should rarely need to work with these directly -- instead, use Controller.Watch with +source.Sources and handler.EventHandlers. + +Events generally contain both a full runtime.Object that caused the event, as well +as a direct handle to that object's metadata. This saves a lot of typecasting in +code that works with Events. +*/ +package event diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go new file mode 100644 index 000000000..271b3c00f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package event + +import "sigs.k8s.io/controller-runtime/pkg/client" + +// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type CreateEvent struct { + // Object is the object from the event + Object client.Object +} + +// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type UpdateEvent struct { + // ObjectOld is the object from the event + ObjectOld client.Object + + // ObjectNew is the object from the event + ObjectNew client.Object +} + +// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type DeleteEvent struct { + // Object is the object from the event + Object client.Object + + // DeleteStateUnknown is true if the Delete event was missed but we identified the object + // as having been deleted. + DeleteStateUnknown bool +} + +// GenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster). +// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an +// handler.EventHandler. +type GenericEvent struct { + // Object is the object from the event + Object client.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go new file mode 100644 index 000000000..3b5b79048 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package handler defines EventHandlers that enqueue reconcile.Requests in response to Create, Update, Deletion Events +observed from Watching Kubernetes APIs. Users should provide a source.Source and handler.EventHandler to +Controller.Watch in order to generate and enqueue reconcile.Request work items. + +Generally, following premade event handlers should be sufficient for most use cases: + +EventHandlers + +EnqueueRequestForObject - Enqueues a reconcile.Request containing the Name and Namespace of the object in the Event. This will +cause the object that was the source of the Event (e.g. the created / deleted / updated object) to be +reconciled. + +EnqueueRequestForOwner - Enqueues a reconcile.Request containing the Name and Namespace of the Owner of the object in the Event. +This will cause owner of the object that was the source of the Event (e.g. the owner object that created the object) +to be reconciled. + +EnqueueRequestsFromMapFunc - Enqueues reconcile.Requests resulting from a user provided transformation function run against the +object in the Event. This will cause an arbitrary collection of objects (defined from a transformation of the +source object) to be reconciled. +*/ +package handler diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go new file mode 100644 index 000000000..e6d3a4eaa --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var enqueueLog = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForObject") + +type empty struct{} + +var _ EventHandler = &EnqueueRequestForObject{} + +// EnqueueRequestForObject enqueues a Request containing the Name and Namespace of the object that is the source of the Event. +// (e.g. the created / deleted / updated objects Name and Namespace). handler.EnqueueRequestForObject is used by almost all +// Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource. +type EnqueueRequestForObject struct{} + +// Create implements EventHandler. +func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + if evt.Object == nil { + enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }}) +} + +// Update implements EventHandler. +func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + switch { + case evt.ObjectNew != nil: + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.ObjectNew.GetName(), + Namespace: evt.ObjectNew.GetNamespace(), + }}) + case evt.ObjectOld != nil: + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.ObjectOld.GetName(), + Namespace: evt.ObjectOld.GetNamespace(), + }}) + default: + enqueueLog.Error(nil, "UpdateEvent received with no metadata", "event", evt) + } +} + +// Delete implements EventHandler. +func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + if evt.Object == nil { + enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }}) +} + +// Generic implements EventHandler. +func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + if evt.Object == nil { + enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) + return + } + q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ + Name: evt.Object.GetName(), + Namespace: evt.Object.GetNamespace(), + }}) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go new file mode 100644 index 000000000..17401b1fd --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -0,0 +1,97 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +// MapFunc is the signature required for enqueueing requests from a generic function. +// This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. +type MapFunc func(client.Object) []reconcile.Request + +// EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection +// of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects +// defined by some user specified transformation of the source Event. (e.g. trigger Reconciler for a set of objects +// in response to a cluster resize event caused by adding or deleting a Node) +// +// EnqueueRequestsFromMapFunc is frequently used to fan-out updates from one object to one or more other +// objects of a differing type. +// +// For UpdateEvents which contain both a new and old object, the transformation function is run on both +// objects and both sets of Requests are enqueue. +func EnqueueRequestsFromMapFunc(fn MapFunc) EventHandler { + return &enqueueRequestsFromMapFunc{ + toRequests: fn, + } +} + +var _ EventHandler = &enqueueRequestsFromMapFunc{} + +type enqueueRequestsFromMapFunc struct { + // Mapper transforms the argument into a slice of keys to be reconciled + toRequests MapFunc +} + +// Create implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.Object, reqs) +} + +// Update implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.ObjectOld, reqs) + e.mapAndEnqueue(q, evt.ObjectNew, reqs) +} + +// Delete implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.Object, reqs) +} + +// Generic implements EventHandler. +func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.mapAndEnqueue(q, evt.Object, reqs) +} + +func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { + for _, req := range e.toRequests(object) { + _, ok := reqs[req] + if !ok { + q.Add(req) + reqs[req] = empty{} + } + } +} + +// EnqueueRequestsFromMapFunc can inject fields into the mapper. + +// InjectFunc implements inject.Injector. +func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { + if f == nil { + return nil + } + return f(e.toRequests) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go new file mode 100644 index 000000000..63699893f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -0,0 +1,189 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var _ EventHandler = &EnqueueRequestForOwner{} + +var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") + +// EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created +// the object that was the source of the Event. +// +// If a ReplicaSet creates Pods, users may reconcile the ReplicaSet in response to Pod Events using: +// +// - a source.Kind Source with Type of Pod. +// +// - a handler.EnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and IsController set to true. +type EnqueueRequestForOwner struct { + // OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. + OwnerType runtime.Object + + // IsController if set will only look at the first OwnerReference with Controller: true. + IsController bool + + // groupKind is the cached Group and Kind from OwnerType + groupKind schema.GroupKind + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper +} + +// Create implements EventHandler. +func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Update implements EventHandler. +func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.ObjectOld, reqs) + e.getOwnerReconcileRequest(evt.ObjectNew, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Delete implements EventHandler. +func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// Generic implements EventHandler. +func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + reqs := map[reconcile.Request]empty{} + e.getOwnerReconcileRequest(evt.Object, reqs) + for req := range reqs { + q.Add(req) + } +} + +// parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false +// if the OwnerType could not be parsed using the scheme. +func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { + // Get the kinds of the type + kinds, _, err := scheme.ObjectKinds(e.OwnerType) + if err != nil { + log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) + return err + } + // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. + if len(kinds) != 1 { + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + return err + } + // Cache the Group and Kind for the OwnerType + e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} + return nil +} + +// getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile +// owners of object that match e.OwnerType. +func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { + // Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested + // by the user + for _, ref := range e.getOwnersReferences(object) { + // Parse the Group out of the OwnerReference to compare it to what was parsed out of the requested OwnerType + refGV, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + log.Error(err, "Could not parse OwnerReference APIVersion", + "api version", ref.APIVersion) + return + } + + // Compare the OwnerReference Group and Kind against the OwnerType Group and Kind specified by the user. + // If the two match, create a Request for the objected referred to by + // the OwnerReference. Use the Name from the OwnerReference and the Namespace from the + // object in the event. + if ref.Kind == e.groupKind.Kind && refGV.Group == e.groupKind.Group { + // Match found - add a Request for the object referred to in the OwnerReference + request := reconcile.Request{NamespacedName: types.NamespacedName{ + Name: ref.Name, + }} + + // if owner is not namespaced then we should set the namespace to the empty + mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) + if err != nil { + log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) + return + } + if mapping.Scope.Name() != meta.RESTScopeNameRoot { + request.Namespace = object.GetNamespace() + } + + result[request] = empty{} + } + } +} + +// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner +// - if IsController is true: only take the Controller OwnerReference (if found) +// - if IsController is false: take all OwnerReferences. +func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { + if object == nil { + return nil + } + + // If not filtered as Controller only, then use all the OwnerReferences + if !e.IsController { + return object.GetOwnerReferences() + } + // If filtered to a Controller, only take the Controller OwnerReference + if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { + return []metav1.OwnerReference{*ownerRef} + } + // No Controller OwnerReference found + return nil +} + +var _ inject.Scheme = &EnqueueRequestForOwner{} + +// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForOwner. +func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error { + return e.parseOwnerTypeGroupKind(s) +} + +var _ inject.Mapper = &EnqueueRequestForOwner{} + +// InjectMapper is called by the Controller to provide the rest mapper used by the manager. +func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error { + e.mapper = m + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go new file mode 100644 index 000000000..8652d22d7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -0,0 +1,104 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handler + +import ( + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +// EventHandler enqueues reconcile.Requests in response to events (e.g. Pod Create). EventHandlers map an Event +// for one object to trigger Reconciles for either the same object or different objects - e.g. if there is an +// Event for object with type Foo (using source.KindSource) then reconcile one or more object(s) with type Bar. +// +// Identical reconcile.Requests will be batched together through the queuing mechanism before reconcile is called. +// +// * Use EnqueueRequestForObject to reconcile the object the event is for +// - do this for events for the type the Controller Reconciles. (e.g. Deployment for a Deployment Controller) +// +// * Use EnqueueRequestForOwner to reconcile the owner of the object the event is for +// - do this for events for the types the Controller creates. (e.g. ReplicaSets created by a Deployment Controller) +// +// * Use EnqueueRequestsFromMapFunc to transform an event for an object to a reconcile of an object +// of a different type - do this for events for types the Controller may be interested in, but doesn't create. +// (e.g. If Foo responds to cluster size events, map Node events to Foo objects.) +// +// Unless you are implementing your own EventHandler, you can ignore the functions on the EventHandler interface. +// Most users shouldn't need to implement their own EventHandler. +type EventHandler interface { + // Create is called in response to an create event - e.g. Pod Creation. + Create(event.CreateEvent, workqueue.RateLimitingInterface) + + // Update is called in response to an update event - e.g. Pod Updated. + Update(event.UpdateEvent, workqueue.RateLimitingInterface) + + // Delete is called in response to a delete event - e.g. Pod Deleted. + Delete(event.DeleteEvent, workqueue.RateLimitingInterface) + + // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or + // external trigger request - e.g. reconcile Autoscaling, or a Webhook. + Generic(event.GenericEvent, workqueue.RateLimitingInterface) +} + +var _ EventHandler = Funcs{} + +// Funcs implements EventHandler. +type Funcs struct { + // Create is called in response to an add event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + CreateFunc func(event.CreateEvent, workqueue.RateLimitingInterface) + + // Update is called in response to an update event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + UpdateFunc func(event.UpdateEvent, workqueue.RateLimitingInterface) + + // Delete is called in response to a delete event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + DeleteFunc func(event.DeleteEvent, workqueue.RateLimitingInterface) + + // GenericFunc is called in response to a generic event. Defaults to no-op. + // RateLimitingInterface is used to enqueue reconcile.Requests. + GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) +} + +// Create implements EventHandler. +func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { + if h.CreateFunc != nil { + h.CreateFunc(e, q) + } +} + +// Delete implements EventHandler. +func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { + if h.DeleteFunc != nil { + h.DeleteFunc(e, q) + } +} + +// Update implements EventHandler. +func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { + if h.UpdateFunc != nil { + h.UpdateFunc(e, q) + } +} + +// Generic implements EventHandler. +func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { + if h.GenericFunc != nil { + h.GenericFunc(e, q) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go new file mode 100644 index 000000000..9827eeafe --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/doc.go @@ -0,0 +1,32 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package healthz contains helpers from supporting liveness and readiness endpoints. +// (often referred to as healthz and readyz, respectively). +// +// This package draws heavily from the apiserver's healthz package +// ( https://github.com/kubernetes/apiserver/tree/master/pkg/server/healthz ) +// but has some changes to bring it in line with controller-runtime's style. +// +// The main entrypoint is the Handler -- this serves both aggregated health status +// and individual health check endpoints. +package healthz + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("healthz") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go new file mode 100644 index 000000000..bd1cc151a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go @@ -0,0 +1,206 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package healthz + +import ( + "fmt" + "net/http" + "path" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// Handler is an http.Handler that aggregates the results of the given +// checkers to the root path, and supports calling individual checkers on +// subpaths of the name of the checker. +// +// Adding checks on the fly is *not* threadsafe -- use a wrapper. +type Handler struct { + Checks map[string]Checker +} + +// checkStatus holds the output of a particular check. +type checkStatus struct { + name string + healthy bool + excluded bool +} + +func (h *Handler) serveAggregated(resp http.ResponseWriter, req *http.Request) { + failed := false + excluded := getExcludedChecks(req) + + parts := make([]checkStatus, 0, len(h.Checks)) + + // calculate the results... + for checkName, check := range h.Checks { + // no-op the check if we've specified we want to exclude the check + if excluded.Has(checkName) { + excluded.Delete(checkName) + parts = append(parts, checkStatus{name: checkName, healthy: true, excluded: true}) + continue + } + if err := check(req); err != nil { + log.V(1).Info("healthz check failed", "checker", checkName, "error", err) + parts = append(parts, checkStatus{name: checkName, healthy: false}) + failed = true + } else { + parts = append(parts, checkStatus{name: checkName, healthy: true}) + } + } + + // ...default a check if none is present... + if len(h.Checks) == 0 { + parts = append(parts, checkStatus{name: "ping", healthy: true}) + } + + for _, c := range excluded.List() { + log.V(1).Info("cannot exclude health check, no matches for it", "checker", c) + } + + // ...sort to be consistent... + sort.Slice(parts, func(i, j int) bool { return parts[i].name < parts[j].name }) + + // ...and write out the result + // TODO(directxman12): this should also accept a request for JSON content (via a accept header) + _, forceVerbose := req.URL.Query()["verbose"] + writeStatusesAsText(resp, parts, excluded, failed, forceVerbose) +} + +// writeStatusAsText writes out the given check statuses in some semi-arbitrary +// bespoke text format that we copied from Kubernetes. unknownExcludes lists +// any checks that the user requested to have excluded, but weren't actually +// known checks. writeStatusAsText is always verbose on failure, and can be +// forced to be verbose on success using the given argument. +func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownExcludes sets.String, failed, forceVerbose bool) { + resp.Header().Set("Content-Type", "text/plain; charset=utf-8") + resp.Header().Set("X-Content-Type-Options", "nosniff") + + // always write status code first + if failed { + resp.WriteHeader(http.StatusInternalServerError) + } else { + resp.WriteHeader(http.StatusOK) + } + + // shortcut for easy non-verbose success + if !failed && !forceVerbose { + fmt.Fprint(resp, "ok") + return + } + + // we're always verbose on failure, so from this point on we're guaranteed to be verbose + + for _, checkOut := range parts { + switch { + case checkOut.excluded: + fmt.Fprintf(resp, "[+]%s excluded: ok\n", checkOut.name) + case checkOut.healthy: + fmt.Fprintf(resp, "[+]%s ok\n", checkOut.name) + default: + // don't include the error since this endpoint is public. If someone wants more detail + // they should have explicit permission to the detailed checks. + fmt.Fprintf(resp, "[-]%s failed: reason withheld\n", checkOut.name) + } + } + + if unknownExcludes.Len() > 0 { + fmt.Fprintf(resp, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(unknownExcludes.List()...)) + } + + if failed { + log.Info("healthz check failed", "statuses", parts) + fmt.Fprintf(resp, "healthz check failed\n") + } else { + fmt.Fprint(resp, "healthz check passed\n") + } +} + +func (h *Handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + // clean up the request (duplicating the internal logic of http.ServeMux a bit) + // clean up the path a bit + reqPath := req.URL.Path + if reqPath == "" || reqPath[0] != '/' { + reqPath = "/" + reqPath + } + // path.Clean removes the trailing slash except for root for us + // (which is fine, since we're only serving one layer of sub-paths) + reqPath = path.Clean(reqPath) + + // either serve the root endpoint... + if reqPath == "/" { + h.serveAggregated(resp, req) + return + } + + // ...the default check (if nothing else is present)... + if len(h.Checks) == 0 && reqPath[1:] == "ping" { + CheckHandler{Checker: Ping}.ServeHTTP(resp, req) + return + } + + // ...or an individual checker + checkName := reqPath[1:] // ignore the leading slash + checker, known := h.Checks[checkName] + if !known { + http.NotFoundHandler().ServeHTTP(resp, req) + return + } + + CheckHandler{Checker: checker}.ServeHTTP(resp, req) +} + +// CheckHandler is an http.Handler that serves a health check endpoint at the root path, +// based on its checker. +type CheckHandler struct { + Checker +} + +func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + if err := h.Checker(req); err != nil { + http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) + } else { + fmt.Fprint(resp, "ok") + } +} + +// Checker knows how to perform a health check. +type Checker func(req *http.Request) error + +// Ping returns true automatically when checked. +var Ping Checker = func(_ *http.Request) error { return nil } + +// getExcludedChecks extracts the health check names to be excluded from the query param. +func getExcludedChecks(r *http.Request) sets.String { + checks, found := r.URL.Query()["exclude"] + if found { + return sets.NewString(checks...) + } + return sets.NewString() +} + +// formatQuoted returns a formatted string of the health check names, +// preserving the order passed in. +func formatQuoted(names ...string) string { + quoted := make([]string, 0, len(names)) + for _, name := range names { + quoted = append(quoted, fmt.Sprintf("%q", name)) + } + return strings.Join(quoted, ",") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go new file mode 100644 index 000000000..3732eea16 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -0,0 +1,360 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/go-logr/logr" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/handler" + ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var _ inject.Injector = &Controller{} + +// Controller implements controller.Controller. +type Controller struct { + // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. + Name string + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // Reconciler is a function that can be called at any time with the Name / Namespace of an object and + // ensures that the state of the system matches the state specified in the object. + // Defaults to the DefaultReconcileFunc. + Do reconcile.Reconciler + + // MakeQueue constructs the queue for this controller once the controller is ready to start. + // This exists because the standard Kubernetes workqueues start themselves immediately, which + // leads to goroutine leaks if something calls controller.New repeatedly. + MakeQueue func() workqueue.RateLimitingInterface + + // Queue is an listeningQueue that listens for events from Informers and adds object keys to + // the Queue for processing + Queue workqueue.RateLimitingInterface + + // SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates + // Deprecated: the caller should handle injected fields itself. + SetFields func(i interface{}) error + + // mu is used to synchronize Controller setup + mu sync.Mutex + + // Started is true if the Controller has been Started + Started bool + + // ctx is the context that was passed to Start() and used when starting watches. + // + // According to the docs, contexts should not be stored in a struct: https://golang.org/pkg/context, + // while we usually always strive to follow best practices, we consider this a legacy case and it should + // undergo a major refactoring and redesign to allow for context to not be stored in a struct. + ctx context.Context + + // CacheSyncTimeout refers to the time limit set on waiting for cache to sync + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // startWatches maintains a list of sources, handlers, and predicates to start when the controller is started. + startWatches []watchDescription + + // LogConstructor is used to construct a logger to then log messages to users during reconciliation, + // or for example when a watch is started. + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of a reconciliation. + LogConstructor func(request *reconcile.Request) logr.Logger + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + RecoverPanic bool +} + +// watchDescription contains all the information necessary to start a watch. +type watchDescription struct { + src source.Source + handler handler.EventHandler + predicates []predicate.Predicate +} + +// Reconcile implements reconcile.Reconciler. +func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { + defer func() { + if r := recover(); r != nil { + if c.RecoverPanic { + for _, fn := range utilruntime.PanicHandlers { + fn(r) + } + err = fmt.Errorf("panic: %v [recovered]", r) + return + } + + log := logf.FromContext(ctx) + log.Info(fmt.Sprintf("Observed a panic in reconciler: %v", r)) + panic(r) + } + }() + return c.Do.Reconcile(ctx, req) +} + +// Watch implements controller.Controller. +func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error { + c.mu.Lock() + defer c.mu.Unlock() + + // Inject Cache into arguments + if err := c.SetFields(src); err != nil { + return err + } + if err := c.SetFields(evthdler); err != nil { + return err + } + for _, pr := range prct { + if err := c.SetFields(pr); err != nil { + return err + } + } + + // Controller hasn't started yet, store the watches locally and return. + // + // These watches are going to be held on the controller struct until the manager or user calls Start(...). + if !c.Started { + c.startWatches = append(c.startWatches, watchDescription{src: src, handler: evthdler, predicates: prct}) + return nil + } + + c.LogConstructor(nil).Info("Starting EventSource", "source", src) + return src.Start(c.ctx, evthdler, c.Queue, prct...) +} + +// Start implements controller.Controller. +func (c *Controller) Start(ctx context.Context) error { + // use an IIFE to get proper lock handling + // but lock outside to get proper handling of the queue shutdown + c.mu.Lock() + if c.Started { + return errors.New("controller was started more than once. This is likely to be caused by being added to a manager multiple times") + } + + c.initMetrics() + + // Set the internal context. + c.ctx = ctx + + c.Queue = c.MakeQueue() + go func() { + <-ctx.Done() + c.Queue.ShutDown() + }() + + wg := &sync.WaitGroup{} + err := func() error { + defer c.mu.Unlock() + + // TODO(pwittrock): Reconsider HandleCrash + defer utilruntime.HandleCrash() + + // NB(directxman12): launch the sources *before* trying to wait for the + // caches to sync so that they have a chance to register their intendeded + // caches. + for _, watch := range c.startWatches { + c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src)) + + if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil { + return err + } + } + + // Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches + c.LogConstructor(nil).Info("Starting Controller") + + for _, watch := range c.startWatches { + syncingSource, ok := watch.src.(source.SyncingSource) + if !ok { + continue + } + + if err := func() error { + // use a context with timeout for launching sources and syncing caches. + sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout) + defer cancel() + + // WaitForSync waits for a definitive timeout, and returns if there + // is an error or a timeout + if err := syncingSource.WaitForSync(sourceStartCtx); err != nil { + err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err) + c.LogConstructor(nil).Error(err, "Could not wait for Cache to sync") + return err + } + + return nil + }(); err != nil { + return err + } + } + + // All the watches have been started, we can reset the local slice. + // + // We should never hold watches more than necessary, each watch source can hold a backing cache, + // which won't be garbage collected if we hold a reference to it. + c.startWatches = nil + + // Launch workers to process resources + c.LogConstructor(nil).Info("Starting workers", "worker count", c.MaxConcurrentReconciles) + wg.Add(c.MaxConcurrentReconciles) + for i := 0; i < c.MaxConcurrentReconciles; i++ { + go func() { + defer wg.Done() + // Run a worker thread that just dequeues items, processes them, and marks them done. + // It enforces that the reconcileHandler is never invoked concurrently with the same object. + for c.processNextWorkItem(ctx) { + } + }() + } + + c.Started = true + return nil + }() + if err != nil { + return err + } + + <-ctx.Done() + c.LogConstructor(nil).Info("Shutdown signal received, waiting for all workers to finish") + wg.Wait() + c.LogConstructor(nil).Info("All workers finished") + return nil +} + +// processNextWorkItem will read a single work item off the workqueue and +// attempt to process it, by calling the reconcileHandler. +func (c *Controller) processNextWorkItem(ctx context.Context) bool { + obj, shutdown := c.Queue.Get() + if shutdown { + // Stop working + return false + } + + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer c.Queue.Done(obj) + + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(1) + defer ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Add(-1) + + c.reconcileHandler(ctx, obj) + return true +} + +const ( + labelError = "error" + labelRequeueAfter = "requeue_after" + labelRequeue = "requeue" + labelSuccess = "success" +) + +func (c *Controller) initMetrics() { + ctrlmetrics.ActiveWorkers.WithLabelValues(c.Name).Set(0) + ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Add(0) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Add(0) + ctrlmetrics.WorkerCount.WithLabelValues(c.Name).Set(float64(c.MaxConcurrentReconciles)) +} + +func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { + // Update metrics after processing each item + reconcileStartTS := time.Now() + defer func() { + c.updateMetrics(time.Since(reconcileStartTS)) + }() + + // Make sure that the object is a valid request. + req, ok := obj.(reconcile.Request) + if !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + c.Queue.Forget(obj) + c.LogConstructor(nil).Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj) + // Return true, don't take a break + return + } + + log := c.LogConstructor(&req) + + log = log.WithValues("reconcileID", uuid.NewUUID()) + ctx = logf.IntoContext(ctx, log) + + // RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the + // resource to be synced. + result, err := c.Reconcile(ctx, req) + switch { + case err != nil: + c.Queue.AddRateLimited(req) + ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() + log.Error(err, "Reconciler error") + case result.RequeueAfter > 0: + // The result.RequeueAfter request will be lost, if it is returned + // along with a non-nil error. But this is intended as + // We need to drive to stable reconcile loops before queuing due + // to result.RequestAfter + c.Queue.Forget(obj) + c.Queue.AddAfter(req, result.RequeueAfter) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc() + case result.Requeue: + c.Queue.AddRateLimited(req) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc() + default: + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.Queue.Forget(obj) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc() + } +} + +// GetLogger returns this controller's logger. +func (c *Controller) GetLogger() logr.Logger { + return c.LogConstructor(nil) +} + +// InjectFunc implement SetFields.Injector. +func (c *Controller) InjectFunc(f inject.Func) error { + c.SetFields = f + return nil +} + +// updateMetrics updates prometheus metrics within the controller. +func (c *Controller) updateMetrics(reconcileTime time.Duration) { + ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go new file mode 100644 index 000000000..baec66927 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // ReconcileTotal is a prometheus counter metrics which holds the total + // number of reconciliations per controller. It has two labels. controller label refers + // to the controller name and result label refers to the reconcile result i.e + // success, error, requeue, requeue_after. + ReconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_reconcile_total", + Help: "Total number of reconciliations per controller", + }, []string{"controller", "result"}) + + // ReconcileErrors is a prometheus counter metrics which holds the total + // number of errors from the Reconciler. + ReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_reconcile_errors_total", + Help: "Total number of reconciliation errors per controller", + }, []string{"controller"}) + + // ReconcileTime is a prometheus metric which keeps track of the duration + // of reconciliations. + ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "controller_runtime_reconcile_time_seconds", + Help: "Length of time per reconciliation per controller", + Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, + }, []string{"controller"}) + + // WorkerCount is a prometheus metric which holds the number of + // concurrent reconciles per controller. + WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_max_concurrent_reconciles", + Help: "Maximum number of concurrent reconciles per controller", + }, []string{"controller"}) + + // ActiveWorkers is a prometheus metric which holds the number + // of active workers per controller. + ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "controller_runtime_active_workers", + Help: "Number of currently used workers per controller", + }, []string{"controller"}) +) + +func init() { + metrics.Registry.MustRegister( + ReconcileTotal, + ReconcileErrors, + ReconcileTime, + WorkerCount, + ActiveWorkers, + // expose process metrics like CPU, Memory, file descriptor usage etc. + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + // expose Go runtime metrics like GC stats, memory stats etc. + collectors.NewGoCollector(), + ) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/httpserver/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/httpserver/server.go new file mode 100644 index 000000000..b5f91f18e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/httpserver/server.go @@ -0,0 +1,16 @@ +package httpserver + +import ( + "net/http" + "time" +) + +// New returns a new server with sane defaults. +func New(handler http.Handler) *http.Server { + return &http.Server{ + Handler: handler, + MaxHeaderBytes: 1 << 20, + IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout + ReadHeaderTimeout: 32 * time.Second, + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go new file mode 100644 index 000000000..d91a0ca50 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" + + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + // RuntimeLog is a base parent logger for use inside controller-runtime. + RuntimeLog logr.Logger +) + +func init() { + RuntimeLog = log.Log.WithName("controller-runtime") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go new file mode 100644 index 000000000..7057f3dbe --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectutil + +import ( + "errors" + "fmt" + + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// FilterWithLabels returns a copy of the items in objs matching labelSel. +func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { + outItems := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outItems = append(outItems, obj.DeepCopyObject()) + } + return outItems, nil +} + +// IsAPINamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsAPINamespacedWithGVK(gvk, scheme, restmapper) +} + +// IsAPINamespacedWithGVK returns true if the object having the provided +// GVK is namespace scoped. +func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != apimeta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go new file mode 100644 index 000000000..46cc1714b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go @@ -0,0 +1,176 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recorder + +import ( + "context" + "fmt" + "sync" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" +) + +// EventBroadcasterProducer makes an event broadcaster, returning +// whether or not the broadcaster should be stopped with the Provider, +// or not (e.g. if it's shared, it shouldn't be stopped with the Provider). +type EventBroadcasterProducer func() (caster record.EventBroadcaster, stopWithProvider bool) + +// Provider is a recorder.Provider that records events to the k8s API server +// and to a logr Logger. +type Provider struct { + lock sync.RWMutex + stopped bool + + // scheme to specify when creating a recorder + scheme *runtime.Scheme + // logger is the logger to use when logging diagnostic event info + logger logr.Logger + evtClient corev1client.EventInterface + makeBroadcaster EventBroadcasterProducer + + broadcasterOnce sync.Once + broadcaster record.EventBroadcaster + stopBroadcaster bool +} + +// NB(directxman12): this manually implements Stop instead of Being a runnable because we need to +// stop it *after* everything else shuts down, otherwise we'll cause panics as the leader election +// code finishes up and tries to continue emitting events. + +// Stop attempts to stop this provider, stopping the underlying broadcaster +// if the broadcaster asked to be stopped. It kinda tries to honor the given +// context, but the underlying broadcaster has an indefinite wait that doesn't +// return until all queued events are flushed, so this may end up just returning +// before the underlying wait has finished instead of cancelling the wait. +// This is Very Frustrating™. +func (p *Provider) Stop(shutdownCtx context.Context) { + doneCh := make(chan struct{}) + + go func() { + // technically, this could start the broadcaster, but practically, it's + // almost certainly already been started (e.g. by leader election). We + // need to invoke this to ensure that we don't inadvertently race with + // an invocation of getBroadcaster. + broadcaster := p.getBroadcaster() + if p.stopBroadcaster { + p.lock.Lock() + broadcaster.Shutdown() + p.stopped = true + p.lock.Unlock() + } + close(doneCh) + }() + + select { + case <-shutdownCtx.Done(): + case <-doneCh: + } +} + +// getBroadcaster ensures that a broadcaster is started for this +// provider, and returns it. It's threadsafe. +func (p *Provider) getBroadcaster() record.EventBroadcaster { + // NB(directxman12): this can technically still leak if something calls + // "getBroadcaster" (i.e. Emits an Event) but never calls Start, but if we + // create the broadcaster in start, we could race with other things that + // are started at the same time & want to emit events. The alternative is + // silently swallowing events and more locking, but that seems suboptimal. + + p.broadcasterOnce.Do(func() { + broadcaster, stop := p.makeBroadcaster() + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: p.evtClient}) + broadcaster.StartEventWatcher( + func(e *corev1.Event) { + p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message) + }) + p.broadcaster = broadcaster + p.stopBroadcaster = stop + }) + + return p.broadcaster +} + +// NewProvider create a new Provider instance. +func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { + corev1Client, err := corev1client.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("failed to init client: %w", err) + } + + p := &Provider{scheme: scheme, logger: logger, makeBroadcaster: makeBroadcaster, evtClient: corev1Client.Events("")} + return p, nil +} + +// GetEventRecorderFor returns an event recorder that broadcasts to this provider's +// broadcaster. All events will be associated with a component of the given name. +func (p *Provider) GetEventRecorderFor(name string) record.EventRecorder { + return &lazyRecorder{ + prov: p, + name: name, + } +} + +// lazyRecorder is a recorder that doesn't actually instantiate any underlying +// recorder until the first event is emitted. +type lazyRecorder struct { + prov *Provider + name string + + recOnce sync.Once + rec record.EventRecorder +} + +// ensureRecording ensures that a concrete recorder is populated for this recorder. +func (l *lazyRecorder) ensureRecording() { + l.recOnce.Do(func() { + broadcaster := l.prov.getBroadcaster() + l.rec = broadcaster.NewRecorder(l.prov.scheme, corev1.EventSource{Component: l.name}) + }) +} + +func (l *lazyRecorder) Event(object runtime.Object, eventtype, reason, message string) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.Event(object, eventtype, reason, message) + } + l.prov.lock.RUnlock() +} +func (l *lazyRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.Eventf(object, eventtype, reason, messageFmt, args...) + } + l.prov.lock.RUnlock() +} +func (l *lazyRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + l.ensureRecording() + + l.prov.lock.RLock() + if !l.prov.stopped { + l.rec.AnnotatedEventf(object, annotations, eventtype, reason, messageFmt, args...) + } + l.prov.lock.RUnlock() +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go new file mode 100644 index 000000000..37a9aefab --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package leaderelection contains a constructor for a leader election resource lock. +This is used to ensure that multiple copies of a controller manager can be run with +only one active set of controllers, for active-passive HA. + +It uses built-in Kubernetes leader election APIs. +*/ +package leaderelection diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go new file mode 100644 index 000000000..ee4fcf4cb --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package leaderelection + +import ( + "errors" + "fmt" + "os" + + "k8s.io/apimachinery/pkg/util/uuid" + coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection/resourcelock" + + "sigs.k8s.io/controller-runtime/pkg/recorder" +) + +const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" + +// Options provides the required configuration to create a new resource lock. +type Options struct { + // LeaderElection determines whether or not to use leader election when + // starting the manager. + LeaderElection bool + + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "leases". + LeaderElectionResourceLock string + + // LeaderElectionNamespace determines the namespace in which the leader + // election resource will be created. + LeaderElectionNamespace string + + // LeaderElectionID determines the name of the resource that leader election + // will use for holding the leader lock. + LeaderElectionID string +} + +// NewResourceLock creates a new resource lock for use in a leader election loop. +func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options Options) (resourcelock.Interface, error) { + if !options.LeaderElection { + return nil, nil + } + + // Default resource lock to "leases". The previous default (from v0.7.0 to v0.11.x) was configmapsleases, which was + // used to migrate from configmaps to leases. Since the default was "configmapsleases" for over a year, spanning + // five minor releases, any actively maintained operators are very likely to have a released version that uses + // "configmapsleases". Therefore defaulting to "leases" should be safe. + if options.LeaderElectionResourceLock == "" { + options.LeaderElectionResourceLock = resourcelock.LeasesResourceLock + } + + // LeaderElectionID must be provided to prevent clashes + if options.LeaderElectionID == "" { + return nil, errors.New("LeaderElectionID must be configured") + } + + // Default the namespace (if running in cluster) + if options.LeaderElectionNamespace == "" { + var err error + options.LeaderElectionNamespace, err = getInClusterNamespace() + if err != nil { + return nil, fmt.Errorf("unable to find leader election namespace: %w", err) + } + } + + // Leader id, needs to be unique + id, err := os.Hostname() + if err != nil { + return nil, err + } + id = id + "_" + string(uuid.NewUUID()) + + // Construct clients for leader election + rest.AddUserAgent(config, "leader-election") + corev1Client, err := corev1client.NewForConfig(config) + if err != nil { + return nil, err + } + + coordinationClient, err := coordinationv1client.NewForConfig(config) + if err != nil { + return nil, err + } + + return resourcelock.New(options.LeaderElectionResourceLock, + options.LeaderElectionNamespace, + options.LeaderElectionID, + corev1Client, + coordinationClient, + resourcelock.ResourceLockConfig{ + Identity: id, + EventRecorder: recorderProvider.GetEventRecorderFor(id), + }) +} + +func getInClusterNamespace() (string, error) { + // Check whether the namespace file exists. + // If not, we are not running in cluster so can't guess the namespace. + if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) { + return "", fmt.Errorf("not running in-cluster, please specify LeaderElectionNamespace") + } else if err != nil { + return "", fmt.Errorf("error checking namespace file: %w", err) + } + + // Load the namespace file and return its content + namespace, err := os.ReadFile(inClusterNamespacePath) + if err != nil { + return "", fmt.Errorf("error reading namespace file: %w", err) + } + return string(namespace), nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go new file mode 100644 index 000000000..6fca63063 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -0,0 +1,188 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// loggerPromise knows how to populate a concrete logr.Logger +// with options, given an actual base logger later on down the line. +type loggerPromise struct { + logger *DelegatingLogSink + childPromises []*loggerPromise + promisesLock sync.Mutex + + name *string + tags []interface{} +} + +func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { + res := &loggerPromise{ + logger: l, + name: &name, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// WithValues provides a new Logger with the tags appended. +func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { + res := &loggerPromise{ + logger: l, + tags: tags, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// Fulfill instantiates the Logger with the provided logger. +func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { + sink := parentLogSink + if p.name != nil { + sink = sink.WithName(*p.name) + } + + if p.tags != nil { + sink = sink.WithValues(p.tags...) + } + + p.logger.lock.Lock() + p.logger.logger = sink + p.logger.promise = nil + p.logger.lock.Unlock() + + for _, childPromise := range p.childPromises { + childPromise.Fulfill(sink) + } +} + +// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// If the underlying promise is not nil, it registers calls to sub-loggers with +// the logging factory to be populated later, and returns a new delegating +// logger. It expects to have *some* logr.Logger set at all times (generally +// a no-op logger before the promises are fulfilled). +type DelegatingLogSink struct { + lock sync.RWMutex + logger logr.LogSink + promise *loggerPromise + info logr.RuntimeInfo +} + +// Init implements logr.LogSink. +func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { + l.lock.Lock() + defer l.lock.Unlock() + l.info = info +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info +// logs. +func (l *DelegatingLogSink) Enabled(level int) bool { + l.lock.RLock() + defer l.lock.RUnlock() + return l.logger.Enabled(level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to +// the log line. The key/value pairs can then be used to add additional +// variable information. The key/value pairs should alternate string +// keys and arbitrary values. +func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Info(level, msg, keysAndValues...) +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to calling Info with the "error" named value, but may +// have unique behavior, and should be preferred for logging errors (see the +// package documentations for more information). +// +// The msg field should be used to add context to any underlying error, +// while the err field should be used to attach the actual error that +// triggered this log line, if present. +func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Error(err, msg, keysAndValues...) +} + +// WithName provides a new Logger with the name appended. +func (l *DelegatingLogSink) WithName(name string) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + return l.logger.WithName(name) + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithName(res, name) + res.promise = promise + + return res +} + +// WithValues provides a new Logger with the tags appended. +func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + return l.logger.WithValues(tags...) + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithValues(res, tags...) + res.promise = promise + + return res +} + +// Fulfill switches the logger over to use the actual logger +// provided, instead of the temporary initial one, if this method +// has not been previously called. +func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { + if l.promise != nil { + l.promise.Fulfill(actual) + } +} + +// NewDelegatingLogSink constructs a new DelegatingLogSink which uses +// the given logger before its promise is fulfilled. +func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { + l := &DelegatingLogSink{ + logger: initial, + promise: &loggerPromise{promisesLock: sync.Mutex{}}, + } + l.promise.logger = l + return l +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go new file mode 100644 index 000000000..3965769c3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log contains utilities for fetching a new logger +// when one is not already available. +// +// The Log Handle +// +// This package contains a root logr.Logger Log. It may be used to +// get a handle to whatever the root logging implementation is. By +// default, no implementation exists, and the handle returns "promises" +// to loggers. When the implementation is set using SetLogger, these +// "promises" will be converted over to real loggers. +// +// Logr +// +// All logging in controller-runtime is structured, using a set of interfaces +// defined by a package called logr +// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides +// helpers for setting up logr backed by Zap (go.uber.org/zap). +package log + +import ( + "context" + "sync" + "time" + + "github.com/go-logr/logr" +) + +// SetLogger sets a concrete logging implementation for all deferred Loggers. +func SetLogger(l logr.Logger) { + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + + loggerWasSet = true + dlog.Fulfill(l.GetSink()) +} + +// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries +// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory +// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. +// +// We need to keep the DelegatingLogSink because we have various inits() that get a logger from +// here. They will always get executed before any code that imports controller-runtime +// has a chance to run and hence to set an actual logger. +func init() { + // Init is blocking, so start a new goroutine + go func() { + time.Sleep(30 * time.Second) + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + if !loggerWasSet { + dlog.Fulfill(NullLogSink{}) + } + }() +} + +var ( + loggerWasSetLock sync.Mutex + loggerWasSet bool +) + +// Log is the base logger used by kubebuilder. It delegates +// to another logr.Logger. You *must* call SetLogger to +// get any actual logging. If SetLogger is not called within +// the first 30 seconds of a binaries lifetime, it will get +// set to a NullLogSink. +var ( + dlog = NewDelegatingLogSink(NullLogSink{}) + Log = logr.New(dlog) +) + +// FromContext returns a logger with predefined values from a context.Context. +func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger { + log := Log + if ctx != nil { + if logger, err := logr.FromContext(ctx); err == nil { + log = logger + } + } + return log.WithValues(keysAndValues...) +} + +// IntoContext takes a context and sets the logger as one of its values. +// Use FromContext function to retrieve the logger. +func IntoContext(ctx context.Context, log logr.Logger) context.Context { + return logr.NewContext(ctx, log) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go new file mode 100644 index 000000000..f3e81074f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" +) + +// NB: this is the same as the null logger logr/testing, +// but avoids accidentally adding the testing flags to +// all binaries. + +// NullLogSink is a logr.Logger that does nothing. +type NullLogSink struct{} + +var _ logr.LogSink = NullLogSink{} + +// Init implements logr.LogSink. +func (log NullLogSink) Init(logr.RuntimeInfo) { +} + +// Info implements logr.InfoLogger. +func (NullLogSink) Info(_ int, _ string, _ ...interface{}) { + // Do nothing. +} + +// Enabled implements logr.InfoLogger. +func (NullLogSink) Enabled(level int) bool { + return false +} + +// Error implements logr.Logger. +func (NullLogSink) Error(_ error, _ string, _ ...interface{}) { + // Do nothing. +} + +// WithName implements logr.Logger. +func (log NullLogSink) WithName(_ string) logr.LogSink { + return log +} + +// WithValues implements logr.Logger. +func (log NullLogSink) WithValues(_ ...interface{}) logr.LogSink { + return log +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go new file mode 100644 index 000000000..e9522632d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// KubeAPIWarningLoggerOptions controls the behavior +// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger(). +type KubeAPIWarningLoggerOptions struct { + // Deduplicate indicates a given warning message should only be written once. + // Setting this to true in a long-running process handling many warnings can + // result in increased memory use. + Deduplicate bool +} + +// KubeAPIWarningLogger is a wrapper around +// a provided logr.Logger that implements the +// rest.WarningHandler interface. +type KubeAPIWarningLogger struct { + // logger is used to log responses with the warning header + logger logr.Logger + // opts contain options controlling warning output + opts KubeAPIWarningLoggerOptions + // writtenLock gurads written + writtenLock sync.Mutex + // used to keep track of already logged messages + // and help in de-duplication. + written map[string]struct{} +} + +// HandleWarningHeader handles logging for responses from API server that are +// warnings with code being 299 and uses a logr.Logger for its logging purposes. +func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) { + if code != 299 || len(message) == 0 { + return + } + + if l.opts.Deduplicate { + l.writtenLock.Lock() + defer l.writtenLock.Unlock() + + if _, alreadyLogged := l.written[message]; alreadyLogged { + return + } + l.written[message] = struct{}{} + } + l.logger.Info(message) +} + +// NewKubeAPIWarningLogger returns an implementation of rest.WarningHandler that logs warnings +// with code = 299 to the provided logr.Logger. +func NewKubeAPIWarningLogger(l logr.Logger, opts KubeAPIWarningLoggerOptions) *KubeAPIWarningLogger { + h := &KubeAPIWarningLogger{logger: l, opts: opts} + if opts.Deduplicate { + h.written = map[string]struct{}{} + } + return h +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/doc.go new file mode 100644 index 000000000..f2976c7f7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package manager is required to create Controllers and provides shared dependencies such as clients, caches, schemes, +etc. Controllers must be started by calling Manager.Start. +*/ +package manager diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go new file mode 100644 index 000000000..5b22c628f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -0,0 +1,652 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/go-logr/logr" + "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +const ( + // Values taken from: https://github.com/kubernetes/component-base/blob/master/config/v1alpha1/defaults.go + defaultLeaseDuration = 15 * time.Second + defaultRenewDeadline = 10 * time.Second + defaultRetryPeriod = 2 * time.Second + defaultGracefulShutdownPeriod = 30 * time.Second + + defaultReadinessEndpoint = "/readyz" + defaultLivenessEndpoint = "/healthz" + defaultMetricsEndpoint = "/metrics" +) + +var _ Runnable = &controllerManager{} + +type controllerManager struct { + sync.Mutex + started bool + + stopProcedureEngaged *int64 + errChan chan error + runnables *runnables + + // cluster holds a variety of methods to interact with a cluster. Required. + cluster cluster.Cluster + + // recorderProvider is used to generate event recorders that will be injected into Controllers + // (and EventHandlers, Sources and Predicates). + recorderProvider *intrec.Provider + + // resourceLock forms the basis for leader election + resourceLock resourcelock.Interface + + // leaderElectionReleaseOnCancel defines if the manager should step back from the leader lease + // on shutdown + leaderElectionReleaseOnCancel bool + + // metricsListener is used to serve prometheus metrics + metricsListener net.Listener + + // metricsExtraHandlers contains extra handlers to register on http server that serves metrics. + metricsExtraHandlers map[string]http.Handler + + // healthProbeListener is used to serve liveness probe + healthProbeListener net.Listener + + // Readiness probe endpoint name + readinessEndpointName string + + // Liveness probe endpoint name + livenessEndpointName string + + // Readyz probe handler + readyzHandler *healthz.Handler + + // Healthz probe handler + healthzHandler *healthz.Handler + + // controllerOptions are the global controller options. + controllerOptions v1alpha1.ControllerConfigurationSpec + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + logger logr.Logger + + // leaderElectionStopped is an internal channel used to signal the stopping procedure that the + // LeaderElection.Run(...) function has returned and the shutdown can proceed. + leaderElectionStopped chan struct{} + + // leaderElectionCancel is used to cancel the leader election. It is distinct from internalStopper, + // because for safety reasons we need to os.Exit() when we lose the leader election, meaning that + // it must be deferred until after gracefulShutdown is done. + leaderElectionCancel context.CancelFunc + + // elected is closed when this manager becomes the leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + elected chan struct{} + + // port is the port that the webhook server serves at. + port int + // host is the hostname that the webhook server binds to. + host string + // CertDir is the directory that contains the server key and certificate. + // if not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs + certDir string + + webhookServer *webhook.Server + // webhookServerOnce will be called in GetWebhookServer() to optionally initialize + // webhookServer if unset, and Add() it to controllerManager. + webhookServerOnce sync.Once + + // leaseDuration is the duration that non-leader candidates will + // wait to force acquire leadership. + leaseDuration time.Duration + // renewDeadline is the duration that the acting controlplane will retry + // refreshing leadership before giving up. + renewDeadline time.Duration + // retryPeriod is the duration the LeaderElector clients should wait + // between tries of actions. + retryPeriod time.Duration + + // gracefulShutdownTimeout is the duration given to runnable to stop + // before the manager actually returns on stop. + gracefulShutdownTimeout time.Duration + + // onStoppedLeading is callled when the leader election lease is lost. + // It can be overridden for tests. + onStoppedLeading func() + + // shutdownCtx is the context that can be used during shutdown. It will be cancelled + // after the gracefulShutdownTimeout ended. It must not be accessed before internalStop + // is closed because it will be nil. + shutdownCtx context.Context + + internalCtx context.Context + internalCancel context.CancelFunc + + // internalProceduresStop channel is used internally to the manager when coordinating + // the proper shutdown of servers. This channel is also used for dependency injection. + internalProceduresStop chan struct{} +} + +type hasCache interface { + Runnable + GetCache() cache.Cache +} + +// Add sets dependencies on i, and adds it to the list of Runnables to start. +func (cm *controllerManager) Add(r Runnable) error { + cm.Lock() + defer cm.Unlock() + return cm.add(r) +} + +func (cm *controllerManager) add(r Runnable) error { + // Set dependencies on the object + if err := cm.SetFields(r); err != nil { + return err + } + return cm.runnables.Add(r) +} + +// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. +func (cm *controllerManager) SetFields(i interface{}) error { + if err := cm.cluster.SetFields(i); err != nil { + return err + } + if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { + return err + } + if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil { + return err + } + if _, err := inject.LoggerInto(cm.logger, i); err != nil { + return err + } + + return nil +} + +// AddMetricsExtraHandler adds extra handler served on path to the http server that serves metrics. +func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Handler) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new metrics handler because metrics endpoint has already been created") + } + + if path == defaultMetricsEndpoint { + return fmt.Errorf("overriding builtin %s endpoint is not allowed", defaultMetricsEndpoint) + } + + if _, found := cm.metricsExtraHandlers[path]; found { + return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) + } + + cm.metricsExtraHandlers[path] = handler + cm.logger.V(2).Info("Registering metrics http server extra handler", "path", path) + return nil +} + +// AddHealthzCheck allows you to add Healthz checker. +func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") + } + + if cm.healthzHandler == nil { + cm.healthzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.healthzHandler.Checks[name] = check + return nil +} + +// AddReadyzCheck allows you to add Readyz checker. +func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error { + cm.Lock() + defer cm.Unlock() + + if cm.started { + return fmt.Errorf("unable to add new checker because healthz endpoint has already been created") + } + + if cm.readyzHandler == nil { + cm.readyzHandler = &healthz.Handler{Checks: map[string]healthz.Checker{}} + } + + cm.readyzHandler.Checks[name] = check + return nil +} + +func (cm *controllerManager) GetConfig() *rest.Config { + return cm.cluster.GetConfig() +} + +func (cm *controllerManager) GetClient() client.Client { + return cm.cluster.GetClient() +} + +func (cm *controllerManager) GetScheme() *runtime.Scheme { + return cm.cluster.GetScheme() +} + +func (cm *controllerManager) GetFieldIndexer() client.FieldIndexer { + return cm.cluster.GetFieldIndexer() +} + +func (cm *controllerManager) GetCache() cache.Cache { + return cm.cluster.GetCache() +} + +func (cm *controllerManager) GetEventRecorderFor(name string) record.EventRecorder { + return cm.cluster.GetEventRecorderFor(name) +} + +func (cm *controllerManager) GetRESTMapper() meta.RESTMapper { + return cm.cluster.GetRESTMapper() +} + +func (cm *controllerManager) GetAPIReader() client.Reader { + return cm.cluster.GetAPIReader() +} + +func (cm *controllerManager) GetWebhookServer() *webhook.Server { + cm.webhookServerOnce.Do(func() { + if cm.webhookServer == nil { + cm.webhookServer = &webhook.Server{ + Port: cm.port, + Host: cm.host, + CertDir: cm.certDir, + } + } + if err := cm.Add(cm.webhookServer); err != nil { + panic(fmt.Sprintf("unable to add webhook server to the controller manager: %s", err)) + } + }) + return cm.webhookServer +} + +func (cm *controllerManager) GetLogger() logr.Logger { + return cm.logger +} + +func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { + return cm.controllerOptions +} + +func (cm *controllerManager) serveMetrics() { + handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ + ErrorHandling: promhttp.HTTPErrorOnError, + }) + // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics + mux := http.NewServeMux() + mux.Handle(defaultMetricsEndpoint, handler) + for path, extraHandler := range cm.metricsExtraHandlers { + mux.Handle(path, extraHandler) + } + + server := httpserver.New(mux) + go cm.httpServe("metrics", cm.logger.WithValues("path", defaultMetricsEndpoint), server, cm.metricsListener) +} + +func (cm *controllerManager) serveHealthProbes() { + mux := http.NewServeMux() + server := httpserver.New(mux) + + if cm.readyzHandler != nil { + mux.Handle(cm.readinessEndpointName, http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.readinessEndpointName+"/", http.StripPrefix(cm.readinessEndpointName, cm.readyzHandler)) + } + if cm.healthzHandler != nil { + mux.Handle(cm.livenessEndpointName, http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + // Append '/' suffix to handle subpaths + mux.Handle(cm.livenessEndpointName+"/", http.StripPrefix(cm.livenessEndpointName, cm.healthzHandler)) + } + + go cm.httpServe("health probe", cm.logger, server, cm.healthProbeListener) +} + +func (cm *controllerManager) httpServe(kind string, log logr.Logger, server *http.Server, ln net.Listener) { + log = log.WithValues("kind", kind, "addr", ln.Addr()) + + go func() { + log.Info("Starting server") + if err := server.Serve(ln); err != nil { + if errors.Is(err, http.ErrServerClosed) { + return + } + if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 { + // There might be cases where connections are still open and we try to shutdown + // but not having enough time to close the connection causes an error in Serve + // + // In that case we want to avoid returning an error to the main error channel. + log.Error(err, "error on Serve after stop has been engaged") + return + } + cm.errChan <- err + } + }() + + // Shutdown the server when stop is closed. + <-cm.internalProceduresStop + if err := server.Shutdown(cm.shutdownCtx); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // Avoid logging context related errors. + return + } + if atomic.LoadInt64(cm.stopProcedureEngaged) > 0 { + cm.logger.Error(err, "error on Shutdown after stop has been engaged") + return + } + cm.errChan <- err + } +} + +// Start starts the manager and waits indefinitely. +// There is only two ways to have start return: +// An error has occurred during in one of the internal operations, +// such as leader election, cache start, webhooks, and so on. +// Or, the context is cancelled. +func (cm *controllerManager) Start(ctx context.Context) (err error) { + cm.Lock() + if cm.started { + cm.Unlock() + return errors.New("manager already started") + } + var ready bool + defer func() { + // Only unlock the manager if we haven't reached + // the internal readiness condition. + if !ready { + cm.Unlock() + } + }() + + // Initialize the internal context. + cm.internalCtx, cm.internalCancel = context.WithCancel(ctx) + + // This chan indicates that stop is complete, in other words all runnables have returned or timeout on stop request + stopComplete := make(chan struct{}) + defer close(stopComplete) + // This must be deferred after closing stopComplete, otherwise we deadlock. + defer func() { + // https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/gettyimages-459889618-1533579787.jpg + stopErr := cm.engageStopProcedure(stopComplete) + if stopErr != nil { + if err != nil { + // Utilerrors.Aggregate allows to use errors.Is for all contained errors + // whereas fmt.Errorf allows wrapping at most one error which means the + // other one can not be found anymore. + err = kerrors.NewAggregate([]error{err, stopErr}) + } else { + err = stopErr + } + } + }() + + // Add the cluster runnable. + if err := cm.add(cm.cluster); err != nil { + return fmt.Errorf("failed to add cluster to runnables: %w", err) + } + + // Metrics should be served whether the controller is leader or not. + // (If we don't serve metrics for non-leaders, prometheus will still scrape + // the pod but will get a connection refused). + if cm.metricsListener != nil { + cm.serveMetrics() + } + + // Serve health probes. + if cm.healthProbeListener != nil { + cm.serveHealthProbes() + } + + // First start any webhook servers, which includes conversion, validation, and defaulting + // webhooks that are registered. + // + // WARNING: Webhooks MUST start before any cache is populated, otherwise there is a race condition + // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks + // to never start because no cache can be populated. + if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil { + if !errors.Is(err, wait.ErrWaitTimeout) { + return err + } + } + + // Start and wait for caches. + if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil { + if !errors.Is(err, wait.ErrWaitTimeout) { + return err + } + } + + // Start the non-leaderelection Runnables after the cache has synced. + if err := cm.runnables.Others.Start(cm.internalCtx); err != nil { + if !errors.Is(err, wait.ErrWaitTimeout) { + return err + } + } + + // Start the leader election and all required runnables. + { + ctx, cancel := context.WithCancel(context.Background()) + cm.leaderElectionCancel = cancel + go func() { + if cm.resourceLock != nil { + if err := cm.startLeaderElection(ctx); err != nil { + cm.errChan <- err + } + } else { + // Treat not having leader election enabled the same as being elected. + if err := cm.startLeaderElectionRunnables(); err != nil { + cm.errChan <- err + } + close(cm.elected) + } + }() + } + + ready = true + cm.Unlock() + select { + case <-ctx.Done(): + // We are done + return nil + case err := <-cm.errChan: + // Error starting or running a runnable + return err + } +} + +// engageStopProcedure signals all runnables to stop, reads potential errors +// from the errChan and waits for them to end. It must not be called more than once. +func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) error { + if !atomic.CompareAndSwapInt64(cm.stopProcedureEngaged, 0, 1) { + return errors.New("stop procedure already engaged") + } + + // Populate the shutdown context, this operation MUST be done before + // closing the internalProceduresStop channel. + // + // The shutdown context immediately expires if the gracefulShutdownTimeout is not set. + var shutdownCancel context.CancelFunc + cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout) + defer shutdownCancel() + + // Start draining the errors before acquiring the lock to make sure we don't deadlock + // if something that has the lock is blocked on trying to write into the unbuffered + // channel after something else already wrote into it. + var closeOnce sync.Once + go func() { + for { + // Closing in the for loop is required to avoid race conditions between + // the closure of all internal procedures and making sure to have a reader off the error channel. + closeOnce.Do(func() { + // Cancel the internal stop channel and wait for the procedures to stop and complete. + close(cm.internalProceduresStop) + cm.internalCancel() + }) + select { + case err, ok := <-cm.errChan: + if ok { + cm.logger.Error(err, "error received after stop sequence was engaged") + } + case <-stopComplete: + return + } + } + }() + + // We want to close this after the other runnables stop, because we don't + // want things like leader election to try and emit events on a closed + // channel + defer cm.recorderProvider.Stop(cm.shutdownCtx) + defer func() { + // Cancel leader election only after we waited. It will os.Exit() the app for safety. + if cm.resourceLock != nil { + // After asking the context to be cancelled, make sure + // we wait for the leader stopped channel to be closed, otherwise + // we might encounter race conditions between this code + // and the event recorder, which is used within leader election code. + cm.leaderElectionCancel() + <-cm.leaderElectionStopped + } + }() + + go func() { + // First stop the non-leader election runnables. + cm.logger.Info("Stopping and waiting for non leader election runnables") + cm.runnables.Others.StopAndWait(cm.shutdownCtx) + + // Stop all the leader election runnables, which includes reconcilers. + cm.logger.Info("Stopping and waiting for leader election runnables") + cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx) + + // Stop the caches before the leader election runnables, this is an important + // step to make sure that we don't race with the reconcilers by receiving more events + // from the API servers and enqueueing them. + cm.logger.Info("Stopping and waiting for caches") + cm.runnables.Caches.StopAndWait(cm.shutdownCtx) + + // Webhooks should come last, as they might be still serving some requests. + cm.logger.Info("Stopping and waiting for webhooks") + cm.runnables.Webhooks.StopAndWait(cm.shutdownCtx) + + // Proceed to close the manager and overall shutdown context. + cm.logger.Info("Wait completed, proceeding to shutdown the manager") + shutdownCancel() + }() + + <-cm.shutdownCtx.Done() + if err := cm.shutdownCtx.Err(); err != nil && !errors.Is(err, context.Canceled) { + if errors.Is(err, context.DeadlineExceeded) { + if cm.gracefulShutdownTimeout > 0 { + return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err) + } + return nil + } + // For any other error, return the error. + return err + } + + return nil +} + +func (cm *controllerManager) startLeaderElectionRunnables() error { + return cm.runnables.LeaderElection.Start(cm.internalCtx) +} + +func (cm *controllerManager) startLeaderElection(ctx context.Context) (err error) { + l, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{ + Lock: cm.resourceLock, + LeaseDuration: cm.leaseDuration, + RenewDeadline: cm.renewDeadline, + RetryPeriod: cm.retryPeriod, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(_ context.Context) { + if err := cm.startLeaderElectionRunnables(); err != nil { + cm.errChan <- err + return + } + close(cm.elected) + }, + OnStoppedLeading: func() { + if cm.onStoppedLeading != nil { + cm.onStoppedLeading() + } + // Make sure graceful shutdown is skipped if we lost the leader lock without + // intending to. + cm.gracefulShutdownTimeout = time.Duration(0) + // Most implementations of leader election log.Fatal() here. + // Since Start is wrapped in log.Fatal when called, we can just return + // an error here which will cause the program to exit. + cm.errChan <- errors.New("leader election lost") + }, + }, + ReleaseOnCancel: cm.leaderElectionReleaseOnCancel, + }) + if err != nil { + return err + } + + // Start the leader elector process + go func() { + l.Run(ctx) + <-ctx.Done() + close(cm.leaderElectionStopped) + }() + return nil +} + +func (cm *controllerManager) Elected() <-chan struct{} { + return cm.elected +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go new file mode 100644 index 000000000..5f76b5a83 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -0,0 +1,620 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "fmt" + "net" + "net/http" + "reflect" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/config" + "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/healthz" + intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" + "sigs.k8s.io/controller-runtime/pkg/leaderelection" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/metrics" + "sigs.k8s.io/controller-runtime/pkg/recorder" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. +// A Manager is required to create Controllers. +type Manager interface { + // Cluster holds a variety of methods to interact with a cluster. + cluster.Cluster + + // Add will set requested dependencies on the component, and cause the component to be + // started when Start is called. Add will inject any dependencies for which the argument + // implements the inject interface - e.g. inject.Client. + // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either + // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). + Add(Runnable) error + + // Elected is closed when this manager is elected leader of a group of + // managers, either because it won a leader election or because no leader + // election was configured. + Elected() <-chan struct{} + + // AddMetricsExtraHandler adds an extra handler served on path to the http server that serves metrics. + // Might be useful to register some diagnostic endpoints e.g. pprof. Note that these endpoints meant to be + // sensitive and shouldn't be exposed publicly. + // If the simple path -> handler mapping offered here is not enough, a new http server/listener should be added as + // Runnable to the manager via Add method. + AddMetricsExtraHandler(path string, handler http.Handler) error + + // AddHealthzCheck allows you to add Healthz checker + AddHealthzCheck(name string, check healthz.Checker) error + + // AddReadyzCheck allows you to add Readyz checker + AddReadyzCheck(name string, check healthz.Checker) error + + // Start starts all registered Controllers and blocks until the context is cancelled. + // Returns an error if there is an error starting any controller. + // + // If LeaderElection is used, the binary must be exited immediately after this returns, + // otherwise components that need leader election might continue to run after the leader + // lock was lost. + Start(ctx context.Context) error + + // GetWebhookServer returns a webhook.Server + GetWebhookServer() *webhook.Server + + // GetLogger returns this manager's logger. + GetLogger() logr.Logger + + // GetControllerOptions returns controller global configuration options. + GetControllerOptions() v1alpha1.ControllerConfigurationSpec +} + +// Options are the arguments for creating a new Manager. +type Options struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources. + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs + MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + SyncPeriod *time.Duration + + // Logger is the logger that should be used by this manager. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + + // LeaderElection determines whether or not to use leader election when + // starting the manager. + LeaderElection bool + + // LeaderElectionResourceLock determines which resource lock to use for leader election, + // defaults to "configmapsleases". Change this value only if you know what you are doing. + // Otherwise, users of your controller might end up with multiple running instances that + // each acquired leadership through different resource locks during upgrades and thus + // act on the same resources concurrently. + // If you want to migrate to the "leases" resource lock, you might do so by migrating to the + // respective multilock first ("configmapsleases" or "endpointsleases"), which will acquire a + // leader lock on both resources. After all your users have migrated to the multilock, you can + // go ahead and migrate to "leases". Please also keep in mind, that users might skip versions + // of your controller. + // + // Note: before controller-runtime version v0.7, the resource lock was set to "configmaps". + // Please keep this in mind, when planning a proper migration path for your controller. + LeaderElectionResourceLock string + + // LeaderElectionNamespace determines the namespace in which the leader + // election resource will be created. + LeaderElectionNamespace string + + // LeaderElectionID determines the name of the resource that leader election + // will use for holding the leader lock. + LeaderElectionID string + + // LeaderElectionConfig can be specified to override the default configuration + // that is used to build the leader election client. + LeaderElectionConfig *rest.Config + + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader doesn't have to wait + // LeaseDuration time first. + LeaderElectionReleaseOnCancel bool + + // LeaseDuration is the duration that non-leader candidates will + // wait to force acquire leadership. This is measured against time of + // last observed ack. Default is 15 seconds. + LeaseDuration *time.Duration + // RenewDeadline is the duration that the acting controlplane will retry + // refreshing leadership before giving up. Default is 10 seconds. + RenewDeadline *time.Duration + // RetryPeriod is the duration the LeaderElector clients should wait + // between tries of actions. Default is 2 seconds. + RetryPeriod *time.Duration + + // Namespace, if specified, restricts the manager's cache to watch objects in + // the desired namespace. Defaults to all namespaces. + // + // Note: If a namespace is specified, controllers can still Watch for a + // cluster-scoped resource (e.g Node). For namespaced resources, the cache + // will only hold objects from the desired namespace. + Namespace string + + // MetricsBindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + MetricsBindAddress string + + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + HealthProbeBindAddress string + + // Readiness probe endpoint name, defaults to "readyz" + ReadinessEndpointName string + + // Liveness probe endpoint name, defaults to "healthz" + LivenessEndpointName string + + // Port is the port that the webhook server serves at. + // It is used to set webhook.Server.Port if WebhookServer is not set. + Port int + // Host is the hostname that the webhook server binds to. + // It is used to set webhook.Server.Host if WebhookServer is not set. + Host string + + // CertDir is the directory that contains the server key and certificate. + // If not set, webhook server would look up the server key and certificate in + // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate + // must be named tls.key and tls.crt, respectively. + // It is used to set webhook.Server.CertDir if WebhookServer is not set. + CertDir string + + // WebhookServer is an externally configured webhook.Server. By default, + // a Manager will create a default server using Port, Host, and CertDir; + // if this is set, the Manager will use this server instead. + WebhookServer *webhook.Server + + // Functions to allow for a user to customize values that will be injected. + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + NewCache cache.NewCacheFunc + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create the default DelegatingClient that will + // use the cache for reads and the client for writes. + NewClient cluster.NewClientFunc + + // BaseContext is the function that provides Context values to Runnables + // managed by the Manager. If a BaseContext function isn't provided, Runnables + // will receive a new Background Context instead. + BaseContext BaseContextFunc + + // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it + // for the given objects. + ClientDisableCacheFor []client.Object + + // DryRunClient specifies whether the client should be configured to enforce + // dryRun mode. + DryRunClient bool + + // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API + // Use this to customize the event correlator and spam filter + // + // Deprecated: using this may cause goroutine leaks if the lifetime of your manager or controllers + // is shorter than the lifetime of your process. + EventBroadcaster record.EventBroadcaster + + // GracefulShutdownTimeout is the duration given to runnable to stop before the manager actually returns on stop. + // To disable graceful shutdown, set to time.Duration(0) + // To use graceful shutdown without timeout, set to a negative duration, e.G. time.Duration(-1) + // The graceful shutdown is skipped for safety reasons in case the leader election lease is lost. + GracefulShutdownTimeout *time.Duration + + // Controller contains global configuration options for controllers + // registered within this manager. + // +optional + Controller v1alpha1.ControllerConfigurationSpec + + // makeBroadcaster allows deferring the creation of the broadcaster to + // avoid leaking goroutines if we never call Start on this manager. It also + // returns whether or not this is a "owned" broadcaster, and as such should be + // stopped with the manager. + makeBroadcaster intrec.EventBroadcasterProducer + + // Dependency injection for testing + newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) + newMetricsListener func(addr string) (net.Listener, error) + newHealthProbeListener func(addr string) (net.Listener, error) +} + +// BaseContextFunc is a function used to provide a base Context to Runnables +// managed by a Manager. +type BaseContextFunc func() context.Context + +// Runnable allows a component to be started. +// It's very important that Start blocks until +// it's done running. +type Runnable interface { + // Start starts running the component. The component will stop running + // when the context is closed. Start blocks until the context is closed or + // an error occurs. + Start(context.Context) error +} + +// RunnableFunc implements Runnable using a function. +// It's very important that the given function block +// until it's done running. +type RunnableFunc func(context.Context) error + +// Start implements Runnable. +func (r RunnableFunc) Start(ctx context.Context) error { + return r(ctx) +} + +// LeaderElectionRunnable knows if a Runnable needs to be run in the leader election mode. +type LeaderElectionRunnable interface { + // NeedLeaderElection returns true if the Runnable needs to be run in the leader election mode. + // e.g. controllers need to be run in leader election mode, while webhook server doesn't. + NeedLeaderElection() bool +} + +// New returns a new Manager for creating Controllers. +func New(config *rest.Config, options Options) (Manager, error) { + // Set default values for options fields + options = setOptionsDefaults(options) + + cluster, err := cluster.New(config, func(clusterOptions *cluster.Options) { + clusterOptions.Scheme = options.Scheme + clusterOptions.MapperProvider = options.MapperProvider + clusterOptions.Logger = options.Logger + clusterOptions.SyncPeriod = options.SyncPeriod + clusterOptions.Namespace = options.Namespace + clusterOptions.NewCache = options.NewCache + clusterOptions.NewClient = options.NewClient + clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor + clusterOptions.DryRunClient = options.DryRunClient + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck + }) + if err != nil { + return nil, err + } + + // Create the recorder provider to inject event recorders for the components. + // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific + // to the particular controller that it's being injected into, rather than a generic one like is here. + recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + + // Create the resource lock to enable leader election) + var leaderConfig *rest.Config + var leaderRecorderProvider *intrec.Provider + + if options.LeaderElectionConfig == nil { + leaderConfig = rest.CopyConfig(config) + leaderRecorderProvider = recorderProvider + } else { + leaderConfig = rest.CopyConfig(options.LeaderElectionConfig) + leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + if err != nil { + return nil, err + } + } + + resourceLock, err := options.newResourceLock(leaderConfig, leaderRecorderProvider, leaderelection.Options{ + LeaderElection: options.LeaderElection, + LeaderElectionResourceLock: options.LeaderElectionResourceLock, + LeaderElectionID: options.LeaderElectionID, + LeaderElectionNamespace: options.LeaderElectionNamespace, + }) + if err != nil { + return nil, err + } + + // Create the metrics listener. This will throw an error if the metrics bind + // address is invalid or already in use. + metricsListener, err := options.newMetricsListener(options.MetricsBindAddress) + if err != nil { + return nil, err + } + + // By default we have no extra endpoints to expose on metrics http server. + metricsExtraHandlers := make(map[string]http.Handler) + + // Create health probes listener. This will throw an error if the bind + // address is invalid or already in use. + healthProbeListener, err := options.newHealthProbeListener(options.HealthProbeBindAddress) + if err != nil { + return nil, err + } + + errChan := make(chan error) + runnables := newRunnables(options.BaseContext, errChan) + + return &controllerManager{ + stopProcedureEngaged: pointer.Int64(0), + cluster: cluster, + runnables: runnables, + errChan: errChan, + recorderProvider: recorderProvider, + resourceLock: resourceLock, + metricsListener: metricsListener, + metricsExtraHandlers: metricsExtraHandlers, + controllerOptions: options.Controller, + logger: options.Logger, + elected: make(chan struct{}), + port: options.Port, + host: options.Host, + certDir: options.CertDir, + webhookServer: options.WebhookServer, + leaseDuration: *options.LeaseDuration, + renewDeadline: *options.RenewDeadline, + retryPeriod: *options.RetryPeriod, + healthProbeListener: healthProbeListener, + readinessEndpointName: options.ReadinessEndpointName, + livenessEndpointName: options.LivenessEndpointName, + gracefulShutdownTimeout: *options.GracefulShutdownTimeout, + internalProceduresStop: make(chan struct{}), + leaderElectionStopped: make(chan struct{}), + leaderElectionReleaseOnCancel: options.LeaderElectionReleaseOnCancel, + }, nil +} + +// AndFrom will use a supplied type and convert to Options +// any options already set on Options will be ignored, this is used to allow +// cli flags to override anything specified in the config file. +func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { + if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { + err := inj.InjectScheme(o.Scheme) + if err != nil { + return o, err + } + } + + newObj, err := loader.Complete() + if err != nil { + return o, err + } + + o = o.setLeaderElectionConfig(newObj) + + if o.SyncPeriod == nil && newObj.SyncPeriod != nil { + o.SyncPeriod = &newObj.SyncPeriod.Duration + } + + if o.Namespace == "" && newObj.CacheNamespace != "" { + o.Namespace = newObj.CacheNamespace + } + + if o.MetricsBindAddress == "" && newObj.Metrics.BindAddress != "" { + o.MetricsBindAddress = newObj.Metrics.BindAddress + } + + if o.HealthProbeBindAddress == "" && newObj.Health.HealthProbeBindAddress != "" { + o.HealthProbeBindAddress = newObj.Health.HealthProbeBindAddress + } + + if o.ReadinessEndpointName == "" && newObj.Health.ReadinessEndpointName != "" { + o.ReadinessEndpointName = newObj.Health.ReadinessEndpointName + } + + if o.LivenessEndpointName == "" && newObj.Health.LivenessEndpointName != "" { + o.LivenessEndpointName = newObj.Health.LivenessEndpointName + } + + if o.Port == 0 && newObj.Webhook.Port != nil { + o.Port = *newObj.Webhook.Port + } + + if o.Host == "" && newObj.Webhook.Host != "" { + o.Host = newObj.Webhook.Host + } + + if o.CertDir == "" && newObj.Webhook.CertDir != "" { + o.CertDir = newObj.Webhook.CertDir + } + + if newObj.Controller != nil { + if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil { + o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout + } + + if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 { + o.Controller.GroupKindConcurrency = newObj.Controller.GroupKindConcurrency + } + } + + return o, nil +} + +// AndFromOrDie will use options.AndFrom() and will panic if there are errors. +func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { + o, err := o.AndFrom(loader) + if err != nil { + panic(fmt.Sprintf("could not parse config file: %v", err)) + } + return o +} + +func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options { + if obj.LeaderElection == nil { + // The source does not have any configuration; noop + return o + } + + if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil { + o.LeaderElection = *obj.LeaderElection.LeaderElect + } + + if o.LeaderElectionResourceLock == "" && obj.LeaderElection.ResourceLock != "" { + o.LeaderElectionResourceLock = obj.LeaderElection.ResourceLock + } + + if o.LeaderElectionNamespace == "" && obj.LeaderElection.ResourceNamespace != "" { + o.LeaderElectionNamespace = obj.LeaderElection.ResourceNamespace + } + + if o.LeaderElectionID == "" && obj.LeaderElection.ResourceName != "" { + o.LeaderElectionID = obj.LeaderElection.ResourceName + } + + if o.LeaseDuration == nil && !reflect.DeepEqual(obj.LeaderElection.LeaseDuration, metav1.Duration{}) { + o.LeaseDuration = &obj.LeaderElection.LeaseDuration.Duration + } + + if o.RenewDeadline == nil && !reflect.DeepEqual(obj.LeaderElection.RenewDeadline, metav1.Duration{}) { + o.RenewDeadline = &obj.LeaderElection.RenewDeadline.Duration + } + + if o.RetryPeriod == nil && !reflect.DeepEqual(obj.LeaderElection.RetryPeriod, metav1.Duration{}) { + o.RetryPeriod = &obj.LeaderElection.RetryPeriod.Duration + } + + return o +} + +// defaultHealthProbeListener creates the default health probes listener bound to the given address. +func defaultHealthProbeListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %w", addr, err) + } + return ln, nil +} + +// defaultBaseContext is used as the BaseContext value in Options if one +// has not already been set. +func defaultBaseContext() context.Context { + return context.Background() +} + +// setOptionsDefaults set default values for Options fields. +func setOptionsDefaults(options Options) Options { + // Allow newResourceLock to be mocked + if options.newResourceLock == nil { + options.newResourceLock = leaderelection.NewResourceLock + } + + // Allow newRecorderProvider to be mocked + if options.newRecorderProvider == nil { + options.newRecorderProvider = intrec.NewProvider + } + + // This is duplicated with pkg/cluster, we need it here + // for the leader election and there to provide the user with + // an EventBroadcaster + if options.EventBroadcaster == nil { + // defer initialization to avoid leaking by default + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return record.NewBroadcaster(), true + } + } else { + options.makeBroadcaster = func() (record.EventBroadcaster, bool) { + return options.EventBroadcaster, false + } + } + + if options.newMetricsListener == nil { + options.newMetricsListener = metrics.NewListener + } + leaseDuration, renewDeadline, retryPeriod := defaultLeaseDuration, defaultRenewDeadline, defaultRetryPeriod + if options.LeaseDuration == nil { + options.LeaseDuration = &leaseDuration + } + + if options.RenewDeadline == nil { + options.RenewDeadline = &renewDeadline + } + + if options.RetryPeriod == nil { + options.RetryPeriod = &retryPeriod + } + + if options.ReadinessEndpointName == "" { + options.ReadinessEndpointName = defaultReadinessEndpoint + } + + if options.LivenessEndpointName == "" { + options.LivenessEndpointName = defaultLivenessEndpoint + } + + if options.newHealthProbeListener == nil { + options.newHealthProbeListener = defaultHealthProbeListener + } + + if options.GracefulShutdownTimeout == nil { + gracefulShutdownTimeout := defaultGracefulShutdownPeriod + options.GracefulShutdownTimeout = &gracefulShutdownTimeout + } + + if options.Logger.GetSink() == nil { + options.Logger = log.Log + } + + if options.BaseContext == nil { + options.BaseContext = defaultBaseContext + } + + return options +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go new file mode 100644 index 000000000..f7b91a209 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go @@ -0,0 +1,297 @@ +package manager + +import ( + "context" + "errors" + "sync" + + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +var ( + errRunnableGroupStopped = errors.New("can't accept new runnable as stop procedure is already engaged") +) + +// readyRunnable encapsulates a runnable with +// a ready check. +type readyRunnable struct { + Runnable + Check runnableCheck + signalReady bool +} + +// runnableCheck can be passed to Add() to let the runnable group determine that a +// runnable is ready. A runnable check should block until a runnable is ready, +// if the returned result is false, the runnable is considered not ready and failed. +type runnableCheck func(ctx context.Context) bool + +// runnables handles all the runnables for a manager by grouping them accordingly to their +// type (webhooks, caches etc.). +type runnables struct { + Webhooks *runnableGroup + Caches *runnableGroup + LeaderElection *runnableGroup + Others *runnableGroup +} + +// newRunnables creates a new runnables object. +func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables { + return &runnables{ + Webhooks: newRunnableGroup(baseContext, errChan), + Caches: newRunnableGroup(baseContext, errChan), + LeaderElection: newRunnableGroup(baseContext, errChan), + Others: newRunnableGroup(baseContext, errChan), + } +} + +// Add adds a runnable to closest group of runnable that they belong to. +// +// Add should be able to be called before and after Start, but not after StopAndWait. +// Add should return an error when called during StopAndWait. +// The runnables added before Start are started when Start is called. +// The runnables added after Start are started directly. +func (r *runnables) Add(fn Runnable) error { + switch runnable := fn.(type) { + case hasCache: + return r.Caches.Add(fn, func(ctx context.Context) bool { + return runnable.GetCache().WaitForCacheSync(ctx) + }) + case *webhook.Server: + return r.Webhooks.Add(fn, nil) + case LeaderElectionRunnable: + if !runnable.NeedLeaderElection() { + return r.Others.Add(fn, nil) + } + return r.LeaderElection.Add(fn, nil) + default: + return r.LeaderElection.Add(fn, nil) + } +} + +// runnableGroup manages a group of runnables that are +// meant to be running together until StopAndWait is called. +// +// Runnables can be added to a group after the group has started +// but not after it's stopped or while shutting down. +type runnableGroup struct { + ctx context.Context + cancel context.CancelFunc + + start sync.Mutex + startOnce sync.Once + started bool + startQueue []*readyRunnable + startReadyCh chan *readyRunnable + + stop sync.RWMutex + stopOnce sync.Once + stopped bool + + // errChan is the error channel passed by the caller + // when the group is created. + // All errors are forwarded to this channel once they occur. + errChan chan error + + // ch is the internal channel where the runnables are read off from. + ch chan *readyRunnable + + // wg is an internal sync.WaitGroup that allows us to properly stop + // and wait for all the runnables to finish before returning. + wg *sync.WaitGroup +} + +func newRunnableGroup(baseContext BaseContextFunc, errChan chan error) *runnableGroup { + r := &runnableGroup{ + startReadyCh: make(chan *readyRunnable), + errChan: errChan, + ch: make(chan *readyRunnable), + wg: new(sync.WaitGroup), + } + + r.ctx, r.cancel = context.WithCancel(baseContext()) + return r +} + +// Started returns true if the group has started. +func (r *runnableGroup) Started() bool { + r.start.Lock() + defer r.start.Unlock() + return r.started +} + +// Start starts the group and waits for all +// initially registered runnables to start. +// It can only be called once, subsequent calls have no effect. +func (r *runnableGroup) Start(ctx context.Context) error { + var retErr error + + r.startOnce.Do(func() { + defer close(r.startReadyCh) + + // Start the internal reconciler. + go r.reconcile() + + // Start the group and queue up all + // the runnables that were added prior. + r.start.Lock() + r.started = true + for _, rn := range r.startQueue { + rn.signalReady = true + r.ch <- rn + } + r.start.Unlock() + + // If we don't have any queue, return. + if len(r.startQueue) == 0 { + return + } + + // Wait for all runnables to signal. + for { + select { + case <-ctx.Done(): + if err := ctx.Err(); !errors.Is(err, context.Canceled) { + retErr = err + } + case rn := <-r.startReadyCh: + for i, existing := range r.startQueue { + if existing == rn { + // Remove the item from the start queue. + r.startQueue = append(r.startQueue[:i], r.startQueue[i+1:]...) + break + } + } + // We're done waiting if the queue is empty, return. + if len(r.startQueue) == 0 { + return + } + } + } + }) + + return retErr +} + +// reconcile is our main entrypoint for every runnable added +// to this group. Its primary job is to read off the internal channel +// and schedule runnables while tracking their state. +func (r *runnableGroup) reconcile() { + for runnable := range r.ch { + // Handle stop. + // If the shutdown has been called we want to avoid + // adding new goroutines to the WaitGroup because Wait() + // panics if Add() is called after it. + { + r.stop.RLock() + if r.stopped { + // Drop any runnables if we're stopped. + r.errChan <- errRunnableGroupStopped + r.stop.RUnlock() + continue + } + + // Why is this here? + // When StopAndWait is called, if a runnable is in the process + // of being added, we could end up in a situation where + // the WaitGroup is incremented while StopAndWait has called Wait(), + // which would result in a panic. + r.wg.Add(1) + r.stop.RUnlock() + } + + // Start the runnable. + go func(rn *readyRunnable) { + go func() { + if rn.Check(r.ctx) { + if rn.signalReady { + r.startReadyCh <- rn + } + } + }() + + // If we return, the runnable ended cleanly + // or returned an error to the channel. + // + // We should always decrement the WaitGroup here. + defer r.wg.Done() + + // Start the runnable. + if err := rn.Start(r.ctx); err != nil { + r.errChan <- err + } + }(runnable) + } +} + +// Add should be able to be called before and after Start, but not after StopAndWait. +// Add should return an error when called during StopAndWait. +func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { + r.stop.RLock() + if r.stopped { + r.stop.RUnlock() + return errRunnableGroupStopped + } + r.stop.RUnlock() + + if ready == nil { + ready = func(_ context.Context) bool { return true } + } + + readyRunnable := &readyRunnable{ + Runnable: rn, + Check: ready, + } + + // Handle start. + // If the overall runnable group isn't started yet + // we want to buffer the runnables and let Start() + // queue them up again later. + { + r.start.Lock() + + // Check if we're already started. + if !r.started { + // Store the runnable in the internal if not. + r.startQueue = append(r.startQueue, readyRunnable) + r.start.Unlock() + return nil + } + r.start.Unlock() + } + + // Enqueue the runnable. + r.ch <- readyRunnable + return nil +} + +// StopAndWait waits for all the runnables to finish before returning. +func (r *runnableGroup) StopAndWait(ctx context.Context) { + r.stopOnce.Do(func() { + // Close the reconciler channel once we're done. + defer close(r.ch) + + _ = r.Start(ctx) + r.stop.Lock() + // Store the stopped variable so we don't accept any new + // runnables for the time being. + r.stopped = true + r.stop.Unlock() + + // Cancel the internal channel. + r.cancel() + + done := make(chan struct{}) + go func() { + defer close(done) + // Wait for all the runnables to finish. + r.wg.Wait() + }() + + select { + case <-done: + // We're done, exit. + case <-ctx.Done(): + // Calling context has expired, exit. + } + }) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go new file mode 100644 index 000000000..737cc7eff --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package signals contains libraries for handling signals to gracefully +// shutdown the manager in combination with Kubernetes pod graceful termination +// policy. +package signals diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go new file mode 100644 index 000000000..a79cfb42d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "context" + "os" + "os/signal" +) + +var onlyOneSignalHandler = make(chan struct{}) + +// SetupSignalHandler registers for SIGTERM and SIGINT. A context is returned +// which is canceled on one of these signals. If a second signal is caught, the program +// is terminated with exit code 1. +func SetupSignalHandler() context.Context { + close(onlyOneSignalHandler) // panics when called twice + + ctx, cancel := context.WithCancel(context.Background()) + + c := make(chan os.Signal, 2) + signal.Notify(c, shutdownSignals...) + go func() { + <-c + cancel() + <-c + os.Exit(1) // second signal. Exit directly. + }() + + return ctx +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go new file mode 100644 index 000000000..a0f00a732 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_posix.go @@ -0,0 +1,27 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "os" + "syscall" +) + +var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go new file mode 100644 index 000000000..4907d573f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/signals/signal_windows.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package signals + +import ( + "os" +) + +var shutdownSignals = []os.Signal{os.Interrupt} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go new file mode 100644 index 000000000..d32ce2534 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -0,0 +1,231 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "net/url" + "time" + + "github.com/prometheus/client_golang/prometheus" + reflectormetrics "k8s.io/client-go/tools/cache" + clientmetrics "k8s.io/client-go/tools/metrics" +) + +// this file contains setup logic to initialize the myriad of places +// that client-go registers metrics. We copy the names and formats +// from Kubernetes so that we match the core controllers. + +// Metrics subsystem and all of the keys used by the rest client. +const ( + RestClientSubsystem = "rest_client" + LatencyKey = "request_latency_seconds" + ResultKey = "requests_total" +) + +// Metrics subsystem and all keys used by the reflectors. +const ( + ReflectorSubsystem = "reflector" + ListsTotalKey = "lists_total" + ListsDurationKey = "list_duration_seconds" + ItemsPerListKey = "items_per_list" + WatchesTotalKey = "watches_total" + ShortWatchesTotalKey = "short_watches_total" + WatchDurationKey = "watch_duration_seconds" + ItemsPerWatchKey = "items_per_watch" + LastResourceVersionKey = "last_resource_version" +) + +var ( + // client metrics. + + // RequestLatency reports the request latency in seconds per verb/URL. + // Deprecated: This metric is deprecated for removal in a future release: using the URL as a + // dimension results in cardinality explosion for some consumers. It was deprecated upstream + // in k8s v1.14 and hidden in v1.17 via https://github.com/kubernetes/kubernetes/pull/83836. + // It is not registered by default. To register: + // import ( + // clientmetrics "k8s.io/client-go/tools/metrics" + // clmetrics "sigs.k8s.io/controller-runtime/metrics" + // ) + // + // func init() { + // clmetrics.Registry.MustRegister(clmetrics.RequestLatency) + // clientmetrics.Register(clientmetrics.RegisterOpts{ + // RequestLatency: clmetrics.LatencyAdapter + // }) + // } + RequestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: RestClientSubsystem, + Name: LatencyKey, + Help: "Request latency in seconds. Broken down by verb and URL.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), + }, []string{"verb", "url"}) + + requestResult = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: RestClientSubsystem, + Name: ResultKey, + Help: "Number of HTTP requests, partitioned by status code, method, and host.", + }, []string{"code", "method", "host"}) + + // reflector metrics. + + // TODO(directxman12): update these to be histograms once the metrics overhaul KEP + // PRs start landing. + + listsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: ReflectorSubsystem, + Name: ListsTotalKey, + Help: "Total number of API lists done by the reflectors", + }, []string{"name"}) + + listsDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: ListsDurationKey, + Help: "How long an API list takes to return and decode for the reflectors", + }, []string{"name"}) + + itemsPerList = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: ItemsPerListKey, + Help: "How many items an API list returns to the reflectors", + }, []string{"name"}) + + watchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: ReflectorSubsystem, + Name: WatchesTotalKey, + Help: "Total number of API watches done by the reflectors", + }, []string{"name"}) + + shortWatchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: ReflectorSubsystem, + Name: ShortWatchesTotalKey, + Help: "Total number of short API watches done by the reflectors", + }, []string{"name"}) + + watchDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: WatchDurationKey, + Help: "How long an API watch takes to return and decode for the reflectors", + }, []string{"name"}) + + itemsPerWatch = prometheus.NewSummaryVec(prometheus.SummaryOpts{ + Subsystem: ReflectorSubsystem, + Name: ItemsPerWatchKey, + Help: "How many items an API watch returns to the reflectors", + }, []string{"name"}) + + lastResourceVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: ReflectorSubsystem, + Name: LastResourceVersionKey, + Help: "Last resource version seen for the reflectors", + }, []string{"name"}) +) + +func init() { + registerClientMetrics() + registerReflectorMetrics() +} + +// registerClientMetrics sets up the client latency metrics from client-go. +func registerClientMetrics() { + // register the metrics with our registry + Registry.MustRegister(requestResult) + + // register the metrics with client-go + clientmetrics.Register(clientmetrics.RegisterOpts{ + RequestResult: &resultAdapter{metric: requestResult}, + }) +} + +// registerReflectorMetrics sets up reflector (reconcile) loop metrics. +func registerReflectorMetrics() { + Registry.MustRegister(listsTotal) + Registry.MustRegister(listsDuration) + Registry.MustRegister(itemsPerList) + Registry.MustRegister(watchesTotal) + Registry.MustRegister(shortWatchesTotal) + Registry.MustRegister(watchDuration) + Registry.MustRegister(itemsPerWatch) + Registry.MustRegister(lastResourceVersion) + + reflectormetrics.SetReflectorMetricsProvider(reflectorMetricsProvider{}) +} + +// this section contains adapters, implementations, and other sundry organic, artisanally +// hand-crafted syntax trees required to convince client-go that it actually wants to let +// someone use its metrics. + +// Client metrics adapters (method #1 for client-go metrics), +// copied (more-or-less directly) from k8s.io/kubernetes setup code +// (which isn't anywhere in an easily-importable place). + +// LatencyAdapter implements LatencyMetric. +type LatencyAdapter struct { + metric *prometheus.HistogramVec +} + +// Observe increments the request latency metric for the given verb/URL. +func (l *LatencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) { + l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) +} + +type resultAdapter struct { + metric *prometheus.CounterVec +} + +func (r *resultAdapter) Increment(_ context.Context, code, method, host string) { + r.metric.WithLabelValues(code, method, host).Inc() +} + +// Reflector metrics provider (method #2 for client-go metrics), +// copied (more-or-less directly) from k8s.io/kubernetes setup code +// (which isn't anywhere in an easily-importable place). + +type reflectorMetricsProvider struct{} + +func (reflectorMetricsProvider) NewListsMetric(name string) reflectormetrics.CounterMetric { + return listsTotal.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewListDurationMetric(name string) reflectormetrics.SummaryMetric { + return listsDuration.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewItemsInListMetric(name string) reflectormetrics.SummaryMetric { + return itemsPerList.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewWatchesMetric(name string) reflectormetrics.CounterMetric { + return watchesTotal.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewShortWatchesMetric(name string) reflectormetrics.CounterMetric { + return shortWatchesTotal.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewWatchDurationMetric(name string) reflectormetrics.SummaryMetric { + return watchDuration.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewItemsInWatchMetric(name string) reflectormetrics.SummaryMetric { + return itemsPerWatch.WithLabelValues(name) +} + +func (reflectorMetricsProvider) NewLastResourceVersionMetric(name string) reflectormetrics.GaugeMetric { + return lastResourceVersion.WithLabelValues(name) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go new file mode 100644 index 000000000..6ed9df951 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package metrics contains controller related metrics utilities +*/ +package metrics diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go new file mode 100644 index 000000000..123d8c15f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "net" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("metrics") + +// DefaultBindAddress sets the default bind address for the metrics listener +// The metrics is on by default. +var DefaultBindAddress = ":8080" + +// NewListener creates a new TCP listener bound to the given address. +func NewListener(addr string) (net.Listener, error) { + if addr == "" { + // If the metrics bind address is empty, default to ":8080" + addr = DefaultBindAddress + } + + // Add a case to disable metrics altogether + if addr == "0" { + return nil, nil + } + + log.Info("Metrics server is starting to listen", "addr", addr) + ln, err := net.Listen("tcp", addr) + if err != nil { + er := fmt.Errorf("error listening on %s: %w", addr, err) + log.Error(er, "metrics server failed to listen. You may want to disable the metrics server or use another port if it is due to conflicts") + return nil, er + } + return ln, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go new file mode 100644 index 000000000..ce17124d5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go @@ -0,0 +1,30 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// RegistererGatherer combines both parts of the API of a Prometheus +// registry, both the Registerer and the Gatherer interfaces. +type RegistererGatherer interface { + prometheus.Registerer + prometheus.Gatherer +} + +// Registry is a prometheus registry for storing metrics within the +// controller-runtime. +var Registry RegistererGatherer = prometheus.NewRegistry() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go new file mode 100644 index 000000000..8ca47235d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/workqueue.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" +) + +// This file is copied and adapted from k8s.io/kubernetes/pkg/util/workqueue/prometheus +// which registers metrics to the default prometheus Registry. We require very +// similar functionality, but must register metrics to a different Registry. + +// Metrics subsystem and all keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +var ( + depth = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + }, []string{"name"}) + + adds = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + }, []string{"name"}) + + latency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + workDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + unfinished = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has been done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name"}) + + longestRunningProcessor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name"}) + + retries = prometheus.NewCounterVec(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + }, []string{"name"}) +) + +func init() { + Registry.MustRegister(depth) + Registry.MustRegister(adds) + Registry.MustRegister(latency) + Registry.MustRegister(workDuration) + Registry.MustRegister(unfinished) + Registry.MustRegister(longestRunningProcessor) + Registry.MustRegister(retries) + + workqueue.SetProvider(workqueueMetricsProvider{}) +} + +type workqueueMetricsProvider struct{} + +func (workqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + return depth.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + return adds.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + return latency.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + return workDuration.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + return unfinished.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + return longestRunningProcessor.WithLabelValues(name) +} + +func (workqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + return retries.WithLabelValues(name) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go new file mode 100644 index 000000000..e498107ef --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package predicate defines Predicates used by Controllers to filter Events before they are provided to EventHandlers. +*/ +package predicate diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go new file mode 100644 index 000000000..8608e22ac --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -0,0 +1,352 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") + +// Predicate filters events before enqueuing the keys. +type Predicate interface { + // Create returns true if the Create event should be processed + Create(event.CreateEvent) bool + + // Delete returns true if the Delete event should be processed + Delete(event.DeleteEvent) bool + + // Update returns true if the Update event should be processed + Update(event.UpdateEvent) bool + + // Generic returns true if the Generic event should be processed + Generic(event.GenericEvent) bool +} + +var _ Predicate = Funcs{} +var _ Predicate = ResourceVersionChangedPredicate{} +var _ Predicate = GenerationChangedPredicate{} +var _ Predicate = AnnotationChangedPredicate{} +var _ Predicate = or{} +var _ Predicate = and{} + +// Funcs is a function that implements Predicate. +type Funcs struct { + // Create returns true if the Create event should be processed + CreateFunc func(event.CreateEvent) bool + + // Delete returns true if the Delete event should be processed + DeleteFunc func(event.DeleteEvent) bool + + // Update returns true if the Update event should be processed + UpdateFunc func(event.UpdateEvent) bool + + // Generic returns true if the Generic event should be processed + GenericFunc func(event.GenericEvent) bool +} + +// Create implements Predicate. +func (p Funcs) Create(e event.CreateEvent) bool { + if p.CreateFunc != nil { + return p.CreateFunc(e) + } + return true +} + +// Delete implements Predicate. +func (p Funcs) Delete(e event.DeleteEvent) bool { + if p.DeleteFunc != nil { + return p.DeleteFunc(e) + } + return true +} + +// Update implements Predicate. +func (p Funcs) Update(e event.UpdateEvent) bool { + if p.UpdateFunc != nil { + return p.UpdateFunc(e) + } + return true +} + +// Generic implements Predicate. +func (p Funcs) Generic(e event.GenericEvent) bool { + if p.GenericFunc != nil { + return p.GenericFunc(e) + } + return true +} + +// NewPredicateFuncs returns a predicate funcs that applies the given filter function +// on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied +// to the new object. +func NewPredicateFuncs(filter func(object client.Object) bool) Funcs { + return Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return filter(e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return filter(e.ObjectNew) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return filter(e.Object) + }, + GenericFunc: func(e event.GenericEvent) bool { + return filter(e.Object) + }, + } +} + +// ResourceVersionChangedPredicate implements a default update predicate function on resource version change. +type ResourceVersionChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating resource version change. +func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object to update", "event", e) + return false + } + + return e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() +} + +// GenerationChangedPredicate implements a default update predicate function on Generation change. +// +// This predicate will skip update events that have no change in the object's metadata.generation field. +// The metadata.generation field of an object is incremented by the API server when writes are made to the spec field of an object. +// This allows a controller to ignore update events where the spec is unchanged, and only the metadata and/or status fields are changed. +// +// For CustomResource objects the Generation is only incremented when the status subresource is enabled. +// +// Caveats: +// +// * The assumption that the Generation is incremented only on writing to the spec does not hold for all APIs. +// E.g For Deployment objects the Generation is also incremented on writes to the metadata.annotations field. +// For object types other than CustomResources be sure to verify which fields will trigger a Generation increment when they are written to. +// +// * With this predicate, any update events with writes only to the status field will not be reconciled. +// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status. +type GenerationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating generation change. +func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() +} + +// AnnotationChangedPredicate implements a default update predicate function on annotation change. +// +// This predicate will skip update events that have no change in the object's annotation. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{})) +// +// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented +// (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API). +type AnnotationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating annotation change. +func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !reflect.DeepEqual(e.ObjectNew.GetAnnotations(), e.ObjectOld.GetAnnotations()) +} + +// LabelChangedPredicate implements a default update predicate function on label change. +// +// This predicate will skip update events that have no change in the object's label. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})) +// +// This will be helpful when object's labels is carrying some extra specification information beyond object's spec, +// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens. +type LabelChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for checking label change. +func (LabelChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) +} + +// And returns a composite predicate that implements a logical AND of the predicates passed to it. +func And(predicates ...Predicate) Predicate { + return and{predicates} +} + +type and struct { + predicates []Predicate +} + +func (a and) InjectFunc(f inject.Func) error { + for _, p := range a.predicates { + if err := f(p); err != nil { + return err + } + } + return nil +} + +func (a and) Create(e event.CreateEvent) bool { + for _, p := range a.predicates { + if !p.Create(e) { + return false + } + } + return true +} + +func (a and) Update(e event.UpdateEvent) bool { + for _, p := range a.predicates { + if !p.Update(e) { + return false + } + } + return true +} + +func (a and) Delete(e event.DeleteEvent) bool { + for _, p := range a.predicates { + if !p.Delete(e) { + return false + } + } + return true +} + +func (a and) Generic(e event.GenericEvent) bool { + for _, p := range a.predicates { + if !p.Generic(e) { + return false + } + } + return true +} + +// Or returns a composite predicate that implements a logical OR of the predicates passed to it. +func Or(predicates ...Predicate) Predicate { + return or{predicates} +} + +type or struct { + predicates []Predicate +} + +func (o or) InjectFunc(f inject.Func) error { + for _, p := range o.predicates { + if err := f(p); err != nil { + return err + } + } + return nil +} + +func (o or) Create(e event.CreateEvent) bool { + for _, p := range o.predicates { + if p.Create(e) { + return true + } + } + return false +} + +func (o or) Update(e event.UpdateEvent) bool { + for _, p := range o.predicates { + if p.Update(e) { + return true + } + } + return false +} + +func (o or) Delete(e event.DeleteEvent) bool { + for _, p := range o.predicates { + if p.Delete(e) { + return true + } + } + return false +} + +func (o or) Generic(e event.GenericEvent) bool { + for _, p := range o.predicates { + if p.Generic(e) { + return true + } + } + return false +} + +// LabelSelectorPredicate constructs a Predicate from a LabelSelector. +// Only objects matching the LabelSelector will be admitted. +func LabelSelectorPredicate(s metav1.LabelSelector) (Predicate, error) { + selector, err := metav1.LabelSelectorAsSelector(&s) + if err != nil { + return Funcs{}, err + } + return NewPredicateFuncs(func(o client.Object) bool { + return selector.Matches(labels.Set(o.GetLabels())) + }), nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go new file mode 100644 index 000000000..a01d603fe --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package ratelimiter defines rate limiters used by Controllers to limit how frequently requests may be queued. + +Typical rate limiters that can be used are implemented in client-go's workqueue package. +*/ +package ratelimiter diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go new file mode 100644 index 000000000..565a3a227 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go @@ -0,0 +1,30 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ratelimiter + +import "time" + +// RateLimiter is an identical interface of client-go workqueue RateLimiter. +type RateLimiter interface { + // When gets an item and gets to decide how long that item should wait + When(item interface{}) time.Duration + // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // or for success, we'll stop tracking it + Forget(item interface{}) + // NumRequeues returns back how many failures the item has had + NumRequeues(item interface{}) int +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go new file mode 100644 index 000000000..d221dd7b3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package reconcile defines the Reconciler interface to implement Kubernetes APIs. Reconciler is provided +to Controllers at creation time as the API implementation. +*/ +package reconcile diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go new file mode 100644 index 000000000..b044e6594 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/types" +) + +// Result contains the result of a Reconciler invocation. +type Result struct { + // Requeue tells the Controller to requeue the reconcile key. Defaults to false. + Requeue bool + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + RequeueAfter time.Duration +} + +// IsZero returns true if this result is empty. +func (r *Result) IsZero() bool { + if r == nil { + return true + } + return *r == Result{} +} + +// Request contains the information necessary to reconcile a Kubernetes object. This includes the +// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about +// any specific Event or the object contents itself. +type Request struct { + // NamespacedName is the name and namespace of the object to reconcile. + types.NamespacedName +} + +/* +Reconciler implements a Kubernetes API for a specific Resource by Creating, Updating or Deleting Kubernetes +objects, or by making changes to systems external to the cluster (e.g. cloudproviders, github, etc). + +reconcile implementations compare the state specified in an object by a user against the actual cluster state, +and then perform operations to make the actual cluster state reflect the state specified by the user. + +Typically, reconcile is triggered by a Controller in response to cluster Events (e.g. Creating, Updating, +Deleting Kubernetes objects) or external Events (GitHub Webhooks, polling external sources, etc). + +Example reconcile Logic: + + * Read an object and all the Pods it owns. + * Observe that the object spec specifies 5 replicas but actual cluster contains only 1 Pod replica. + * Create 4 Pods and set their OwnerReferences to the object. + +reconcile may be implemented as either a type: + + type reconcile struct {} + + func (reconcile) reconcile(controller.Request) (controller.Result, error) { + // Implement business logic of reading and writing objects here + return controller.Result{}, nil + } + +Or as a function: + + controller.Func(func(o controller.Request) (controller.Result, error) { + // Implement business logic of reading and writing objects here + return controller.Result{}, nil + }) + +Reconciliation is level-based, meaning action isn't driven off changes in individual Events, but instead is +driven by actual cluster state read from the apiserver or a local cache. +For example if responding to a Pod Delete Event, the Request won't contain that a Pod was deleted, +instead the reconcile function observes this when reading the cluster state and seeing the Pod as missing. +*/ +type Reconciler interface { + // Reconcile performs a full reconciliation for the object referred to by the Request. + // The Controller will requeue the Request to be processed again if an error is non-nil or + // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. + Reconcile(context.Context, Request) (Result, error) +} + +// Func is a function that implements the reconcile interface. +type Func func(context.Context, Request) (Result, error) + +var _ Reconciler = Func(nil) + +// Reconcile implements Reconciler. +func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go new file mode 100644 index 000000000..f093f0a72 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go @@ -0,0 +1,31 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package recorder defines interfaces for working with Kubernetes event recorders. +// +// You can use these to emit Kubernetes events associated with a particular Kubernetes +// object. +package recorder + +import ( + "k8s.io/client-go/tools/record" +) + +// Provider knows how to generate new event recorders with given name. +type Provider interface { + // NewRecorder returns an EventRecorder with given name. + GetEventRecorderFor(name string) record.EventRecorder +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go new file mode 100644 index 000000000..17c60895f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package inject defines interfaces and functions for propagating dependencies from a ControllerManager to +the components registered with it. Dependencies are propagated to Reconciler, Source, EventHandler and Predicate +objects which implement the Injectable interfaces. +*/ +package inject diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go new file mode 100644 index 000000000..c8c56ba81 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go @@ -0,0 +1,164 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package inject is used by a Manager to inject types into Sources, EventHandlers, Predicates, and Reconciles. +// Deprecated: Use manager.Options fields directly. This package will be removed in v0.10. +package inject + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and +// Reconciles. +type Cache interface { + InjectCache(cache cache.Cache) error +} + +// CacheInto will set informers on i and return the result if it implements Cache. Returns +// false if i does not implement Cache. +func CacheInto(c cache.Cache, i interface{}) (bool, error) { + if s, ok := i.(Cache); ok { + return true, s.InjectCache(c) + } + return false, nil +} + +// APIReader is used by the Manager to inject the APIReader into necessary types. +type APIReader interface { + InjectAPIReader(client.Reader) error +} + +// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. +// Returns false if i does not implement APIReader. +func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { + if s, ok := i.(APIReader); ok { + return true, s.InjectAPIReader(reader) + } + return false, nil +} + +// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and +// Reconciles. +type Config interface { + InjectConfig(*rest.Config) error +} + +// ConfigInto will set config on i and return the result if it implements Config. Returns +// false if i does not implement Config. +func ConfigInto(config *rest.Config, i interface{}) (bool, error) { + if s, ok := i.(Config); ok { + return true, s.InjectConfig(config) + } + return false, nil +} + +// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and +// Reconciles. +type Client interface { + InjectClient(client.Client) error +} + +// ClientInto will set client on i and return the result if it implements Client. Returns +// false if i does not implement Client. +func ClientInto(client client.Client, i interface{}) (bool, error) { + if s, ok := i.(Client); ok { + return true, s.InjectClient(client) + } + return false, nil +} + +// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and +// Reconciles. +type Scheme interface { + InjectScheme(scheme *runtime.Scheme) error +} + +// SchemeInto will set scheme and return the result on i if it implements Scheme. Returns +// false if i does not implement Scheme. +func SchemeInto(scheme *runtime.Scheme, i interface{}) (bool, error) { + if is, ok := i.(Scheme); ok { + return true, is.InjectScheme(scheme) + } + return false, nil +} + +// Stoppable is used by the ControllerManager to inject stop channel into Sources, +// EventHandlers, Predicates, and Reconciles. +type Stoppable interface { + InjectStopChannel(<-chan struct{}) error +} + +// StopChannelInto will set stop channel on i and return the result if it implements Stoppable. +// Returns false if i does not implement Stoppable. +func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { + if s, ok := i.(Stoppable); ok { + return true, s.InjectStopChannel(stop) + } + return false, nil +} + +// Mapper is used to inject the rest mapper to components that may need it. +type Mapper interface { + InjectMapper(meta.RESTMapper) error +} + +// MapperInto will set the rest mapper on i and return the result if it implements Mapper. +// Returns false if i does not implement Mapper. +func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { + if m, ok := i.(Mapper); ok { + return true, m.InjectMapper(mapper) + } + return false, nil +} + +// Func injects dependencies into i. +type Func func(i interface{}) error + +// Injector is used by the ControllerManager to inject Func into Controllers. +type Injector interface { + InjectFunc(f Func) error +} + +// InjectorInto will set f and return the result on i if it implements Injector. Returns +// false if i does not implement Injector. +func InjectorInto(f Func, i interface{}) (bool, error) { + if ii, ok := i.(Injector); ok { + return true, ii.InjectFunc(f) + } + return false, nil +} + +// Logger is used to inject Loggers into components that need them +// and don't otherwise have opinions. +type Logger interface { + InjectLogger(l logr.Logger) error +} + +// LoggerInto will set the logger on the given object if it implements inject.Logger, +// returning true if a InjectLogger was called, and false otherwise. +func LoggerInto(l logr.Logger, i interface{}) (bool, error) { + if injectable, wantsLogger := i.(Logger); wantsLogger { + return true, injectable.InjectLogger(l) + } + return false, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go new file mode 100644 index 000000000..9dc93a9b2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scheme contains utilities for gradually building Schemes, +// which contain information associating Go types with Kubernetes +// groups, versions, and kinds. +// +// Each API group should define a utility function +// called AddToScheme for adding its types to a Scheme: +// +// // in package myapigroupv1... +// var ( +// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} +// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +// AddToScheme = SchemeBuilder.AddToScheme +// ) +// +// func init() { +// SchemeBuilder.Register(&MyType{}, &MyTypeList) +// } +// var ( +// scheme *runtime.Scheme = runtime.NewScheme() +// ) +// +// This also true of the built-in Kubernetes types. Then, in the entrypoint for +// your manager, assemble the scheme containing exactly the types you need, +// panicing if scheme registration failed. For instance, if our controller needs +// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1: +// +// func init() { +// utilruntime.Must(myapigroupv1.AddToScheme(scheme)) +// utilruntime.Must(kubernetesscheme.AddToScheme(scheme)) +// } +// +// func main() { +// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ +// Scheme: scheme, +// }) +// // ... +// } +// +package scheme + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type Builder struct { + GroupVersion schema.GroupVersion + runtime.SchemeBuilder +} + +// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +func (bld *Builder) Register(object ...runtime.Object) *Builder { + bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(bld.GroupVersion, object...) + metav1.AddToGroupVersion(scheme, bld.GroupVersion) + return nil + }) + return bld +} + +// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld. +func (bld *Builder) RegisterAll(b *Builder) *Builder { + bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...) + return bld +} + +// AddToScheme adds all registered types to s. +func (bld *Builder) AddToScheme(s *runtime.Scheme) error { + return bld.SchemeBuilder.AddToScheme(s) +} + +// Build returns a new Scheme containing the registered types. +func (bld *Builder) Build() (*runtime.Scheme, error) { + s := runtime.NewScheme() + return s, bld.AddToScheme(s) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go new file mode 100644 index 000000000..31935c83c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package source provides event streams to hook up to Controllers with Controller.Watch. Events are +used with handler.EventHandlers to enqueue reconcile.Requests and trigger Reconciles for Kubernetes +objects. +*/ +package source diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go new file mode 100644 index 000000000..f0cfe212e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go @@ -0,0 +1,138 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "fmt" + + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") + +var _ cache.ResourceEventHandler = EventHandler{} + +// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. +type EventHandler struct { + EventHandler handler.EventHandler + Queue workqueue.RateLimitingInterface + Predicates []predicate.Predicate +} + +// OnAdd creates CreateEvent and calls Create on EventHandler. +func (e EventHandler) OnAdd(obj interface{}) { + c := event.CreateEvent{} + + // Pull Object out of the object + if o, ok := obj.(client.Object); ok { + c.Object = o + } else { + log.Error(nil, "OnAdd missing Object", + "object", obj, "type", fmt.Sprintf("%T", obj)) + return + } + + for _, p := range e.Predicates { + if !p.Create(c) { + return + } + } + + // Invoke create handler + e.EventHandler.Create(c, e.Queue) +} + +// OnUpdate creates UpdateEvent and calls Update on EventHandler. +func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { + u := event.UpdateEvent{} + + if o, ok := oldObj.(client.Object); ok { + u.ObjectOld = o + } else { + log.Error(nil, "OnUpdate missing ObjectOld", + "object", oldObj, "type", fmt.Sprintf("%T", oldObj)) + return + } + + // Pull Object out of the object + if o, ok := newObj.(client.Object); ok { + u.ObjectNew = o + } else { + log.Error(nil, "OnUpdate missing ObjectNew", + "object", newObj, "type", fmt.Sprintf("%T", newObj)) + return + } + + for _, p := range e.Predicates { + if !p.Update(u) { + return + } + } + + // Invoke update handler + e.EventHandler.Update(u, e.Queue) +} + +// OnDelete creates DeleteEvent and calls Delete on EventHandler. +func (e EventHandler) OnDelete(obj interface{}) { + d := event.DeleteEvent{} + + // Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a + // DeleteFinalStateUnknown struct, so the object needs to be pulled out. + // Copied from sample-controller + // This should never happen if we aren't missing events, which we have concluded that we are not + // and made decisions off of this belief. Maybe this shouldn't be here? + var ok bool + if _, ok = obj.(client.Object); !ok { + // If the object doesn't have Metadata, assume it is a tombstone object of type DeletedFinalStateUnknown + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + log.Error(nil, "Error decoding objects. Expected cache.DeletedFinalStateUnknown", + "type", fmt.Sprintf("%T", obj), + "object", obj) + return + } + + // Set obj to the tombstone obj + obj = tombstone.Obj + } + + // Pull Object out of the object + if o, ok := obj.(client.Object); ok { + d.Object = o + } else { + log.Error(nil, "OnDelete missing Object", + "object", obj, "type", fmt.Sprintf("%T", obj)) + return + } + + for _, p := range e.Predicates { + if !p.Delete(d) { + return + } + } + + // Invoke delete handler + e.EventHandler.Delete(d, e.Queue) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go new file mode 100644 index 000000000..241c582ef --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -0,0 +1,375 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package source + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/source/internal" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var log = logf.RuntimeLog.WithName("source") + +const ( + // defaultBufferSize is the default number of event notifications that can be buffered. + defaultBufferSize = 1024 +) + +// Source is a source of events (eh.g. Create, Update, Delete operations on Kubernetes Objects, Webhook callbacks, etc) +// which should be processed by event.EventHandlers to enqueue reconcile.Requests. +// +// * Use Kind for events originating in the cluster (e.g. Pod Create, Pod Update, Deployment Update). +// +// * Use Channel for events originating outside the cluster (eh.g. GitHub Webhook callback, Polling external urls). +// +// Users may build their own Source implementations. If their implementations implement any of the inject package +// interfaces, the dependencies will be injected by the Controller when Watch is called. +type Source interface { + // Start is internal and should be called only by the Controller to register an EventHandler with the Informer + // to enqueue reconcile.Requests. + Start(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error +} + +// SyncingSource is a source that needs syncing prior to being usable. The controller +// will call its WaitForSync prior to starting workers. +type SyncingSource interface { + Source + WaitForSync(ctx context.Context) error +} + +// NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used +// and not overwritten. It can be used to watch objects in a different cluster by passing the cache +// from that other cluster. +func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { + return &kindWithCache{kind: Kind{Type: object, cache: cache}} +} + +type kindWithCache struct { + kind Kind +} + +func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + return ks.kind.Start(ctx, handler, queue, prct...) +} + +func (ks *kindWithCache) WaitForSync(ctx context.Context) error { + return ks.kind.WaitForSync(ctx) +} + +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Kind struct { + // Type is the type of object to watch. e.g. &v1.Pod{} + Type client.Object + + // cache used to watch APIs + cache cache.Cache + + // started may contain an error if one was encountered during startup. If its closed and does not + // contain an error, startup and syncing finished. + started chan error + startCancel func() +} + +var _ SyncingSource = &Kind{} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + // Type should have been specified by the user. + if ks.Type == nil { + return fmt.Errorf("must specify Kind.Type") + } + + // cache should have been injected before Start was called + if ks.cache == nil { + return fmt.Errorf("must call CacheInto on Kind before calling Start") + } + + // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not + // sync that informer (most commonly due to RBAC issues). + ctx, ks.startCancel = context.WithCancel(ctx) + ks.started = make(chan error) + go func() { + var ( + i cache.Informer + lastErr error + ) + + // Tries to get an informer until it returns true, + // an error or the specified context is cancelled or expired. + if err := wait.PollImmediateUntilWithContext(ctx, 10*time.Second, func(ctx context.Context) (bool, error) { + // Lookup the Informer from the Cache and add an EventHandler which populates the Queue + i, lastErr = ks.cache.GetInformer(ctx, ks.Type) + if lastErr != nil { + kindMatchErr := &meta.NoKindMatchError{} + switch { + case errors.As(lastErr, &kindMatchErr): + log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", + "kind", kindMatchErr.GroupKind) + case runtime.IsNotRegisteredError(lastErr): + log.Error(lastErr, "kind must be registered to the Scheme") + default: + log.Error(lastErr, "failed to get informer from cache") + } + return false, nil // Retry. + } + return true, nil + }); err != nil { + if lastErr != nil { + ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) + return + } + ks.started <- err + return + } + + i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + if !ks.cache.WaitForCacheSync(ctx) { + // Would be great to return something more informative here + ks.started <- errors.New("cache did not sync") + } + close(ks.started) + }() + + return nil +} + +func (ks *Kind) String() string { + if ks.Type != nil { + return fmt.Sprintf("kind source: %T", ks.Type) + } + return "kind source: unknown type" +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +func (ks *Kind) WaitForSync(ctx context.Context) error { + select { + case err := <-ks.started: + return err + case <-ctx.Done(): + ks.startCancel() + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return errors.New("timed out waiting for cache to be synced") + } +} + +var _ inject.Cache = &Kind{} + +// InjectCache is internal should be called only by the Controller. InjectCache is used to inject +// the Cache dependency initialized by the ControllerManager. +func (ks *Kind) InjectCache(c cache.Cache) error { + if ks.cache == nil { + ks.cache = c + } + return nil +} + +var _ Source = &Channel{} + +// Channel is used to provide a source of events originating outside the cluster +// (e.g. GitHub Webhook callback). Channel requires the user to wire the external +// source (eh.g. http handler) to write GenericEvents to the underlying channel. +type Channel struct { + // once ensures the event distribution goroutine will be performed only once + once sync.Once + + // Source is the source channel to fetch GenericEvents + Source <-chan event.GenericEvent + + // stop is to end ongoing goroutine, and close the channels + stop <-chan struct{} + + // dest is the destination channels of the added event handlers + dest []chan event.GenericEvent + + // DestBufferSize is the specified buffer size of dest channels. + // Default to 1024 if not specified. + DestBufferSize int + + // destLock is to ensure the destination channels are safely added/removed + destLock sync.Mutex +} + +func (cs *Channel) String() string { + return fmt.Sprintf("channel source: %p", cs) +} + +var _ inject.Stoppable = &Channel{} + +// InjectStopChannel is internal should be called only by the Controller. +// It is used to inject the stop channel initialized by the ControllerManager. +func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error { + if cs.stop == nil { + cs.stop = stop + } + + return nil +} + +// Start implements Source and should only be called by the Controller. +func (cs *Channel) Start( + ctx context.Context, + handler handler.EventHandler, + queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + // Source should have been specified by the user. + if cs.Source == nil { + return fmt.Errorf("must specify Channel.Source") + } + + // stop should have been injected before Start was called + if cs.stop == nil { + return fmt.Errorf("must call InjectStop on Channel before calling Start") + } + + // use default value if DestBufferSize not specified + if cs.DestBufferSize == 0 { + cs.DestBufferSize = defaultBufferSize + } + + dst := make(chan event.GenericEvent, cs.DestBufferSize) + + cs.destLock.Lock() + cs.dest = append(cs.dest, dst) + cs.destLock.Unlock() + + cs.once.Do(func() { + // Distribute GenericEvents to all EventHandler / Queue pairs Watching this source + go cs.syncLoop(ctx) + }) + + go func() { + for evt := range dst { + shouldHandle := true + for _, p := range prct { + if !p.Generic(evt) { + shouldHandle = false + break + } + } + + if shouldHandle { + handler.Generic(evt, queue) + } + } + }() + + return nil +} + +func (cs *Channel) doStop() { + cs.destLock.Lock() + defer cs.destLock.Unlock() + + for _, dst := range cs.dest { + close(dst) + } +} + +func (cs *Channel) distribute(evt event.GenericEvent) { + cs.destLock.Lock() + defer cs.destLock.Unlock() + + for _, dst := range cs.dest { + // We cannot make it under goroutine here, or we'll meet the + // race condition of writing message to closed channels. + // To avoid blocking, the dest channels are expected to be of + // proper buffer size. If we still see it blocked, then + // the controller is thought to be in an abnormal state. + dst <- evt + } +} + +func (cs *Channel) syncLoop(ctx context.Context) { + for { + select { + case <-ctx.Done(): + // Close destination channels + cs.doStop() + return + case evt, stillOpen := <-cs.Source: + if !stillOpen { + // if the source channel is closed, we're never gonna get + // anything more on it, so stop & bail + cs.doStop() + return + } + cs.distribute(evt) + } + } +} + +// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Informer struct { + // Informer is the controller-runtime Informer + Informer cache.Informer +} + +var _ Source = &Informer{} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + // Informer should have been specified by the user. + if is.Informer == nil { + return fmt.Errorf("must specify Informer.Informer") + } + + is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + return nil +} + +func (is *Informer) String() string { + return fmt.Sprintf("informer source: %p", is.Informer) +} + +var _ Source = Func(nil) + +// Func is a function that implements Source. +type Func func(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error + +// Start implements Source. +func (f Func) Start(ctx context.Context, evt handler.EventHandler, queue workqueue.RateLimitingInterface, + pr ...predicate.Predicate) error { + return f(ctx, evt, queue, pr...) +} + +func (f Func) String() string { + return fmt.Sprintf("func source: %p", f) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go new file mode 100644 index 000000000..c7cb71b75 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -0,0 +1,72 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/json" +) + +// Decoder knows how to decode the contents of an admission +// request into a concrete object. +type Decoder struct { + codecs serializer.CodecFactory +} + +// NewDecoder creates a Decoder given the runtime.Scheme. +func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { + return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +} + +// Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. +// If you want decode the OldObject in the AdmissionRequest, use DecodeRaw. +// It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes. +func (d *Decoder) Decode(req Request, into runtime.Object) error { + // we error out if rawObj is an empty object. + if len(req.Object.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } + return d.DecodeRaw(req.Object, into) +} + +// DecodeRaw decodes a RawExtension object into the passed-in runtime.Object. +// It errors out if rawObj is empty i.e. containing 0 raw bytes. +func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) error { + // NB(directxman12): there's a bug/weird interaction between decoders and + // the API server where the API server doesn't send a GVK on the embedded + // objects, which means the unstructured decoder refuses to decode. It + // also means we can't pass the unstructured directly in, since it'll try + // and call unstructured's special Unmarshal implementation, which calls + // back into that same decoder :-/ + // See kubernetes/kubernetes#74373. + + // we error out if rawObj is an empty object. + if len(rawObj.Raw) == 0 { + return fmt.Errorf("there is no content to decode") + } + if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { + // unmarshal into unstructured's underlying object to avoid calling the decoder + return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object) + } + + deserializer := d.codecs.UniversalDeserializer() + return runtime.DecodeInto(deserializer, rawObj.Raw, into) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go new file mode 100644 index 000000000..0d9aa7a83 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "net/http" + + "k8s.io/apimachinery/pkg/runtime" +) + +// Defaulter defines functions for setting defaults on resources. +type Defaulter interface { + runtime.Object + Default() +} + +// DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. +func DefaultingWebhookFor(defaulter Defaulter) *Webhook { + return &Webhook{ + Handler: &mutatingHandler{defaulter: defaulter}, + } +} + +type mutatingHandler struct { + defaulter Defaulter + decoder *Decoder +} + +var _ DecoderInjector = &mutatingHandler{} + +// InjectDecoder injects the decoder into a mutatingHandler. +func (h *mutatingHandler) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { + if h.defaulter == nil { + panic("defaulter should never be nil") + } + + // Get the object in the request + obj := h.defaulter.DeepCopyObject().(Defaulter) + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + // Default the object + obj.Default() + marshalled, err := json.Marshal(obj) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + + // Create the patch + return PatchResponseFromRaw(req.Object.Raw, marshalled) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go new file mode 100644 index 000000000..d65727e62 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go @@ -0,0 +1,86 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "errors" + "net/http" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// CustomDefaulter defines functions for setting defaults on resources. +type CustomDefaulter interface { + Default(ctx context.Context, obj runtime.Object) error +} + +// WithCustomDefaulter creates a new Webhook for a CustomDefaulter interface. +func WithCustomDefaulter(obj runtime.Object, defaulter CustomDefaulter) *Webhook { + return &Webhook{ + Handler: &defaulterForType{object: obj, defaulter: defaulter}, + } +} + +type defaulterForType struct { + defaulter CustomDefaulter + object runtime.Object + decoder *Decoder +} + +var _ DecoderInjector = &defaulterForType{} + +func (h *defaulterForType) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *defaulterForType) Handle(ctx context.Context, req Request) Response { + if h.defaulter == nil { + panic("defaulter should never be nil") + } + if h.object == nil { + panic("object should never be nil") + } + + ctx = NewContextWithRequest(ctx, req) + + // Get the object in the request + obj := h.object.DeepCopyObject() + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + // Default the object + if err := h.defaulter.Default(ctx, obj); err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + + // Create the patch + marshalled, err := json.Marshal(obj) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + return PatchResponseFromRaw(req.Object.Raw, marshalled) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go new file mode 100644 index 000000000..0b274dd02 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package admission provides implementation for admission webhook and methods to implement admission webhook handlers. + +See examples/mutatingwebhook.go and examples/validatingwebhook.go for examples of admission webhooks. +*/ +package admission + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("admission") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go new file mode 100644 index 000000000..066cc4225 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -0,0 +1,153 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + v1 "k8s.io/api/admission/v1" + "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var admissionScheme = runtime.NewScheme() +var admissionCodecs = serializer.NewCodecFactory(admissionScheme) + +func init() { + utilruntime.Must(v1.AddToScheme(admissionScheme)) + utilruntime.Must(v1beta1.AddToScheme(admissionScheme)) +} + +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var body []byte + var err error + ctx := r.Context() + if wh.WithContextFunc != nil { + ctx = wh.WithContextFunc(ctx, r) + } + + var reviewResponse Response + if r.Body == nil { + err = errors.New("request body is empty") + wh.log.Error(err, "bad request") + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + + defer r.Body.Close() + if body, err = io.ReadAll(r.Body); err != nil { + wh.log.Error(err, "unable to read the body from the incoming request") + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + + // verify the content type is accurate + if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { + err = fmt.Errorf("contentType=%s, expected application/json", contentType) + wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + + // Both v1 and v1beta1 AdmissionReview types are exactly the same, so the v1beta1 type can + // be decoded into the v1 type. However the runtime codec's decoder guesses which type to + // decode into by type name if an Object's TypeMeta isn't set. By setting TypeMeta of an + // unregistered type to the v1 GVK, the decoder will coerce a v1beta1 AdmissionReview to v1. + // The actual AdmissionReview GVK will be used to write a typed response in case the + // webhook config permits multiple versions, otherwise this response will fail. + req := Request{} + ar := unversionedAdmissionReview{} + // avoid an extra copy + ar.Request = &req.AdmissionRequest + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) + if err != nil { + wh.log.Error(err, "unable to decode the request") + reviewResponse = Errored(http.StatusBadRequest, err) + wh.writeResponse(w, reviewResponse) + return + } + wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource) + + reviewResponse = wh.Handle(ctx, req) + wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) +} + +// writeResponse writes response to w generically, i.e. without encoding GVK information. +func (wh *Webhook) writeResponse(w io.Writer, response Response) { + wh.writeAdmissionResponse(w, v1.AdmissionReview{Response: &response.AdmissionResponse}) +} + +// writeResponseTyped writes response to w with GVK set to admRevGVK, which is necessary +// if multiple AdmissionReview versions are permitted by the webhook. +func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK *schema.GroupVersionKind) { + ar := v1.AdmissionReview{ + Response: &response.AdmissionResponse, + } + // Default to a v1 AdmissionReview, otherwise the API server may not recognize the request + // if multiple AdmissionReview versions are permitted by the webhook config. + // TODO(estroz): this should be configurable since older API servers won't know about v1. + if admRevGVK == nil || *admRevGVK == (schema.GroupVersionKind{}) { + ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) + } else { + ar.SetGroupVersionKind(*admRevGVK) + } + wh.writeAdmissionResponse(w, ar) +} + +// writeAdmissionResponse writes ar to w. +func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { + if err := json.NewEncoder(w).Encode(ar); err != nil { + wh.log.Error(err, "unable to encode and write the response") + // Since the `ar v1.AdmissionReview` is a clear and legal object, + // it should not have problem to be marshalled into bytes. + // The error here is probably caused by the abnormal HTTP connection, + // e.g., broken pipe, so we can only write the error response once, + // to avoid endless circular calling. + serverError := Errored(http.StatusInternalServerError, err) + if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil { + wh.log.Error(err, "still unable to encode and write the InternalServerError response") + } + } else { + res := ar.Response + if log := wh.log; log.V(1).Enabled() { + if res.Result != nil { + log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason) + } + log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed) + } + } +} + +// unversionedAdmissionReview is used to decode both v1 and v1beta1 AdmissionReview types. +type unversionedAdmissionReview struct { + v1.AdmissionReview +} + +var _ runtime.Object = &unversionedAdmissionReview{} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go new file mode 100644 index 000000000..d5af0d598 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go @@ -0,0 +1,31 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +// DecoderInjector is used by the ControllerManager to inject decoder into webhook handlers. +type DecoderInjector interface { + InjectDecoder(*Decoder) error +} + +// InjectDecoderInto will set decoder on i and return the result if it implements Decoder. Returns +// false if i does not implement Decoder. +func InjectDecoderInto(decoder *Decoder, i interface{}) (bool, error) { + if s, ok := i.(DecoderInjector); ok { + return true, s.InjectDecoder(decoder) + } + return false, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go new file mode 100644 index 000000000..26900cf2e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -0,0 +1,147 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +type multiMutating []Handler + +func (hs multiMutating) Handle(ctx context.Context, req Request) Response { + patches := []jsonpatch.JsonPatchOperation{} + for _, handler := range hs { + resp := handler.Handle(ctx, req) + if !resp.Allowed { + return resp + } + if resp.PatchType != nil && *resp.PatchType != admissionv1.PatchTypeJSONPatch { + return Errored(http.StatusInternalServerError, + fmt.Errorf("unexpected patch type returned by the handler: %v, only allow: %v", + resp.PatchType, admissionv1.PatchTypeJSONPatch)) + } + patches = append(patches, resp.Patches...) + } + var err error + marshaledPatch, err := json.Marshal(patches) + if err != nil { + return Errored(http.StatusBadRequest, fmt.Errorf("error when marshaling the patch: %w", err)) + } + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + Patch: marshaledPatch, + PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(), + }, + } +} + +// InjectFunc injects the field setter into the handlers. +func (hs multiMutating) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + for _, handler := range hs { + if err := f(handler); err != nil { + return err + } + } + + return nil +} + +// InjectDecoder injects the decoder into the handlers. +func (hs multiMutating) InjectDecoder(d *Decoder) error { + for _, handler := range hs { + if _, err := InjectDecoderInto(d, handler); err != nil { + return err + } + } + return nil +} + +// MultiMutatingHandler combines multiple mutating webhook handlers into a single +// mutating webhook handler. Handlers are called in sequential order, and the first +// `allowed: false` response may short-circuit the rest. Users must take care to +// ensure patches are disjoint. +func MultiMutatingHandler(handlers ...Handler) Handler { + return multiMutating(handlers) +} + +type multiValidating []Handler + +func (hs multiValidating) Handle(ctx context.Context, req Request) Response { + for _, handler := range hs { + resp := handler.Handle(ctx, req) + if !resp.Allowed { + return resp + } + } + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + Result: &metav1.Status{ + Code: http.StatusOK, + }, + }, + } +} + +// MultiValidatingHandler combines multiple validating webhook handlers into a single +// validating webhook handler. Handlers are called in sequential order, and the first +// `allowed: false` response may short-circuit the rest. +func MultiValidatingHandler(handlers ...Handler) Handler { + return multiValidating(handlers) +} + +// InjectFunc injects the field setter into the handlers. +func (hs multiValidating) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + for _, handler := range hs { + if err := f(handler); err != nil { + return err + } + } + + return nil +} + +// InjectDecoder injects the decoder into the handlers. +func (hs multiValidating) InjectDecoder(d *Decoder) error { + for _, handler := range hs { + if _, err := InjectDecoderInto(d, handler); err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go new file mode 100644 index 000000000..24ff1dee3 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go @@ -0,0 +1,121 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "net/http" + + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Allowed constructs a response indicating that the given operation +// is allowed (without any patches). +func Allowed(reason string) Response { + return ValidationResponse(true, reason) +} + +// Denied constructs a response indicating that the given operation +// is not allowed. +func Denied(reason string) Response { + return ValidationResponse(false, reason) +} + +// Patched constructs a response indicating that the given operation is +// allowed, and that the target object should be modified by the given +// JSONPatch operations. +func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { + resp := Allowed(reason) + resp.Patches = patches + + return resp +} + +// Errored creates a new Response for error-handling a request. +func Errored(code int32, err error) Response { + return Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: false, + Result: &metav1.Status{ + Code: code, + Message: err.Error(), + }, + }, + } +} + +// ValidationResponse returns a response for admitting a request. +func ValidationResponse(allowed bool, reason string) Response { + code := http.StatusForbidden + if allowed { + code = http.StatusOK + } + resp := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: allowed, + Result: &metav1.Status{ + Code: int32(code), + }, + }, + } + if len(reason) > 0 { + resp.Result.Reason = metav1.StatusReason(reason) + } + return resp +} + +// PatchResponseFromRaw takes 2 byte arrays and returns a new response with json patch. +// The original object should be passed in as raw bytes to avoid the roundtripping problem +// described in https://github.com/kubernetes-sigs/kubebuilder/issues/510. +func PatchResponseFromRaw(original, current []byte) Response { + patches, err := jsonpatch.CreatePatch(original, current) + if err != nil { + return Errored(http.StatusInternalServerError, err) + } + return Response{ + Patches: patches, + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: true, + PatchType: func() *admissionv1.PatchType { + if len(patches) == 0 { + return nil + } + pt := admissionv1.PatchTypeJSONPatch + return &pt + }(), + }, + } +} + +// validationResponseFromStatus returns a response for admitting a request with provided Status object. +func validationResponseFromStatus(allowed bool, status metav1.Status) Response { + resp := Response{ + AdmissionResponse: admissionv1.AdmissionResponse{ + Allowed: allowed, + Result: &status, + }, + } + return resp +} + +// WithWarnings adds the given warnings to the Response. +// If any warnings were already given, they will not be overwritten. +func (r Response) WithWarnings(warnings ...string) Response { + r.AdmissionResponse.Warnings = append(r.AdmissionResponse.Warnings, warnings...) + return r +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go new file mode 100644 index 000000000..4b27e75ed --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -0,0 +1,122 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + goerrors "errors" + "net/http" + + v1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// Validator defines functions for validating an operation. +type Validator interface { + runtime.Object + ValidateCreate() error + ValidateUpdate(old runtime.Object) error + ValidateDelete() error +} + +// ValidatingWebhookFor creates a new Webhook for validating the provided type. +func ValidatingWebhookFor(validator Validator) *Webhook { + return &Webhook{ + Handler: &validatingHandler{validator: validator}, + } +} + +type validatingHandler struct { + validator Validator + decoder *Decoder +} + +var _ DecoderInjector = &validatingHandler{} + +// InjectDecoder injects the decoder into a validatingHandler. +func (h *validatingHandler) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { + if h.validator == nil { + panic("validator should never be nil") + } + + // Get the object in the request + obj := h.validator.DeepCopyObject().(Validator) + if req.Operation == v1.Create { + err := h.decoder.Decode(req, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateCreate() + if err != nil { + var apiStatus apierrors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + } + + if req.Operation == v1.Update { + oldObj := obj.DeepCopyObject() + + err := h.decoder.DecodeRaw(req.Object, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + err = h.decoder.DecodeRaw(req.OldObject, oldObj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateUpdate(oldObj) + if err != nil { + var apiStatus apierrors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + } + + if req.Operation == v1.Delete { + // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 + // OldObject contains the object being deleted + err := h.decoder.DecodeRaw(req.OldObject, obj) + if err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = obj.ValidateDelete() + if err != nil { + var apiStatus apierrors.APIStatus + if goerrors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + } + + return Allowed("") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go new file mode 100644 index 000000000..33252f113 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go @@ -0,0 +1,113 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "errors" + "fmt" + "net/http" + + v1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" +) + +// CustomValidator defines functions for validating an operation. +type CustomValidator interface { + ValidateCreate(ctx context.Context, obj runtime.Object) error + ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error + ValidateDelete(ctx context.Context, obj runtime.Object) error +} + +// WithCustomValidator creates a new Webhook for validating the provided type. +func WithCustomValidator(obj runtime.Object, validator CustomValidator) *Webhook { + return &Webhook{ + Handler: &validatorForType{object: obj, validator: validator}, + } +} + +type validatorForType struct { + validator CustomValidator + object runtime.Object + decoder *Decoder +} + +var _ DecoderInjector = &validatorForType{} + +// InjectDecoder injects the decoder into a validatingHandler. +func (h *validatorForType) InjectDecoder(d *Decoder) error { + h.decoder = d + return nil +} + +// Handle handles admission requests. +func (h *validatorForType) Handle(ctx context.Context, req Request) Response { + if h.validator == nil { + panic("validator should never be nil") + } + if h.object == nil { + panic("object should never be nil") + } + + ctx = NewContextWithRequest(ctx, req) + + // Get the object in the request + obj := h.object.DeepCopyObject() + + var err error + switch req.Operation { + case v1.Create: + if err := h.decoder.Decode(req, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = h.validator.ValidateCreate(ctx, obj) + case v1.Update: + oldObj := obj.DeepCopyObject() + if err := h.decoder.DecodeRaw(req.Object, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + if err := h.decoder.DecodeRaw(req.OldObject, oldObj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = h.validator.ValidateUpdate(ctx, oldObj, obj) + case v1.Delete: + // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 + // OldObject contains the object being deleted + if err := h.decoder.DecodeRaw(req.OldObject, obj); err != nil { + return Errored(http.StatusBadRequest, err) + } + + err = h.validator.ValidateDelete(ctx, obj) + default: + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation request %q", req.Operation)) + } + + // Check the error message first. + if err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()) + } + return Denied(err.Error()) + } + + // Return allowed if everything succeeded. + return Allowed("") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go new file mode 100644 index 000000000..cfc46637c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -0,0 +1,273 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package admission + +import ( + "context" + "errors" + "net/http" + + "github.com/go-logr/logr" + jsonpatch "gomodules.xyz/jsonpatch/v2" + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/kubernetes/scheme" + + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" +) + +var ( + errUnableToEncodeResponse = errors.New("unable to encode response") +) + +// Request defines the input for an admission handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type Request struct { + admissionv1.AdmissionRequest +} + +// Response is the output of an admission handler. +// It contains a response indicating if a given +// operation is allowed, as well as a set of patches +// to mutate the object in the case of a mutating admission handler. +type Response struct { + // Patches are the JSON patches for mutating webhooks. + // Using this instead of setting Response.Patch to minimize + // overhead of serialization and deserialization. + // Patches set here will override any patches in the response, + // so leave this empty if you want to set the patch response directly. + Patches []jsonpatch.JsonPatchOperation + // AdmissionResponse is the raw admission response. + // The Patch field in it will be overwritten by the listed patches. + admissionv1.AdmissionResponse +} + +// Complete populates any fields that are yet to be set in +// the underlying AdmissionResponse, It mutates the response. +func (r *Response) Complete(req Request) error { + r.UID = req.UID + + // ensure that we have a valid status code + if r.Result == nil { + r.Result = &metav1.Status{} + } + if r.Result.Code == 0 { + r.Result.Code = http.StatusOK + } + // TODO(directxman12): do we need to populate this further, and/or + // is code actually necessary (the same webhook doesn't use it) + + if len(r.Patches) == 0 { + return nil + } + + var err error + r.Patch, err = json.Marshal(r.Patches) + if err != nil { + return err + } + patchType := admissionv1.PatchTypeJSONPatch + r.PatchType = &patchType + + return nil +} + +// Handler can handle an AdmissionRequest. +type Handler interface { + // Handle yields a response to an AdmissionRequest. + // + // The supplied context is extracted from the received http.Request, allowing wrapping + // http.Handlers to inject values into and control cancelation of downstream request processing. + Handle(context.Context, Request) Response +} + +// HandlerFunc implements Handler interface using a single function. +type HandlerFunc func(context.Context, Request) Response + +var _ Handler = HandlerFunc(nil) + +// Handle process the AdmissionRequest by invoking the underlying function. +func (f HandlerFunc) Handle(ctx context.Context, req Request) Response { + return f(ctx, req) +} + +// Webhook represents each individual webhook. +// +// It must be registered with a webhook.Server or +// populated by StandaloneWebhook to be ran on an arbitrary HTTP server. +type Webhook struct { + // Handler actually processes an admission request returning whether it was allowed or denied, + // and potentially patches to apply to the handler. + Handler Handler + + // WithContextFunc will allow you to take the http.Request.Context() and + // add any additional information such as passing the request path or + // headers thus allowing you to read them from within the handler + WithContextFunc func(context.Context, *http.Request) context.Context + + // decoder is constructed on receiving a scheme and passed down to then handler + decoder *Decoder + + log logr.Logger +} + +// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. +func (wh *Webhook) InjectLogger(l logr.Logger) error { + wh.log = l + return nil +} + +// Handle processes AdmissionRequest. +// If the webhook is mutating type, it delegates the AdmissionRequest to each handler and merge the patches. +// If the webhook is validating type, it delegates the AdmissionRequest to each handler and +// deny the request if anyone denies. +func (wh *Webhook) Handle(ctx context.Context, req Request) Response { + resp := wh.Handler.Handle(ctx, req) + if err := resp.Complete(req); err != nil { + wh.log.Error(err, "unable to encode response") + return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) + } + + return resp +} + +// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { + // TODO(directxman12): we should have a better way to pass this down + + var err error + wh.decoder, err = NewDecoder(s) + if err != nil { + return err + } + + // inject the decoder here too, just in case the order of calling this is not + // scheme first, then inject func + if wh.Handler != nil { + if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil { + return err + } + } + + return nil +} + +// GetDecoder returns a decoder to decode the objects embedded in admission requests. +// It may be nil if we haven't received a scheme to use to determine object types yet. +func (wh *Webhook) GetDecoder() *Decoder { + return wh.decoder +} + +// InjectFunc injects the field setter into the webhook. +func (wh *Webhook) InjectFunc(f inject.Func) error { + // inject directly into the handlers. It would be more correct + // to do this in a sync.Once in Handle (since we don't have some + // other start/finalize-type method), but it's more efficient to + // do it here, presumably. + + // also inject a decoder, and wrap this so that we get a setFields + // that injects a decoder (hopefully things don't ignore the duplicate + // InjectorInto call). + + var setFields inject.Func + setFields = func(target interface{}) error { + if err := f(target); err != nil { + return err + } + + if _, err := inject.InjectorInto(setFields, target); err != nil { + return err + } + + if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil { + return err + } + + return nil + } + + return setFields(wh.Handler) +} + +// StandaloneOptions let you configure a StandaloneWebhook. +type StandaloneOptions struct { + // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources + // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better + // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. + Scheme *runtime.Scheme + // Logger to be used by the webhook. + // If none is set, it defaults to log.Log global logger. + Logger logr.Logger + // MetricsPath is used for labelling prometheus metrics + // by the path is served on. + // If none is set, prometheus metrics will not be generated. + MetricsPath string +} + +// StandaloneWebhook prepares a webhook for use without a webhook.Server, +// passing in the information normally populated by webhook.Server +// and instrumenting the webhook with metrics. +// +// Use this to attach your webhook to an arbitrary HTTP server or mux. +// +// Note that you are responsible for terminating TLS if you use StandaloneWebhook +// in your own server/mux. In order to be accessed by a kubernetes cluster, +// all webhook servers require TLS. +func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) { + if opts.Scheme == nil { + opts.Scheme = scheme.Scheme + } + + if err := hook.InjectScheme(opts.Scheme); err != nil { + return nil, err + } + + if opts.Logger.GetSink() == nil { + opts.Logger = logf.RuntimeLog.WithName("webhook") + } + hook.log = opts.Logger + + if opts.MetricsPath == "" { + return hook, nil + } + return metrics.InstrumentedHook(opts.MetricsPath, hook), nil +} + +// requestContextKey is how we find the admission.Request in a context.Context. +type requestContextKey struct{} + +// RequestFromContext returns an admission.Request from ctx. +func RequestFromContext(ctx context.Context) (Request, error) { + if v, ok := ctx.Value(requestContextKey{}).(Request); ok { + return v, nil + } + + return Request{}, errors.New("admission.Request not found in context") +} + +// NewContextWithRequest returns a new Context, derived from ctx, which carries the +// provided admission.Request. +func NewContextWithRequest(ctx context.Context, req Request) context.Context { + return context.WithValue(ctx, requestContextKey{}, req) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go new file mode 100644 index 000000000..293137db4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "gomodules.xyz/jsonpatch/v2" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// define some aliases for common bits of the webhook functionality + +// Defaulter defines functions for setting defaults on resources. +type Defaulter = admission.Defaulter + +// Validator defines functions for validating an operation. +type Validator = admission.Validator + +// CustomDefaulter defines functions for setting defaults on resources. +type CustomDefaulter = admission.CustomDefaulter + +// CustomValidator defines functions for validating an operation. +type CustomValidator = admission.CustomValidator + +// AdmissionRequest defines the input for an admission handler. +// It contains information to identify the object in +// question (group, version, kind, resource, subresource, +// name, namespace), as well as the operation in question +// (e.g. Get, Create, etc), and the object itself. +type AdmissionRequest = admission.Request + +// AdmissionResponse is the output of an admission handler. +// It contains a response indicating if a given +// operation is allowed, as well as a set of patches +// to mutate the object in the case of a mutating admission handler. +type AdmissionResponse = admission.Response + +// Admission is webhook suitable for registration with the server +// an admission webhook that validates API operations and potentially +// mutates their contents. +type Admission = admission.Webhook + +// AdmissionHandler knows how to process admission requests, validating them, +// and potentially mutating the objects they contain. +type AdmissionHandler = admission.Handler + +// AdmissionDecoder knows how to decode objects from admission requests. +type AdmissionDecoder = admission.Decoder + +// JSONPatchOp represents a single JSONPatch patch operation. +type JSONPatchOp = jsonpatch.Operation + +var ( + // Allowed indicates that the admission request should be allowed for the given reason. + Allowed = admission.Allowed + + // Denied indicates that the admission request should be denied for the given reason. + Denied = admission.Denied + + // Patched indicates that the admission request should be allowed for the given reason, + // and that the contained object should be mutated using the given patches. + Patched = admission.Patched + + // Errored indicates that an error occurred in the admission request. + Errored = admission.Errored +) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go new file mode 100644 index 000000000..a5b7a282c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go @@ -0,0 +1,345 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package conversion provides implementation for CRD conversion webhook that implements handler for version conversion requests for types that are convertible. + +See pkg/conversion for interface definitions required to ensure an API Type is convertible. +*/ +package conversion + +import ( + "encoding/json" + "fmt" + "net/http" + + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/conversion" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + log = logf.Log.WithName("conversion-webhook") +) + +// Webhook implements a CRD conversion webhook HTTP handler. +type Webhook struct { + scheme *runtime.Scheme + decoder *Decoder +} + +// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { + var err error + wh.scheme = s + wh.decoder, err = NewDecoder(s) + if err != nil { + return err + } + + return nil +} + +// ensure Webhook implements http.Handler +var _ http.Handler = &Webhook{} + +func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + convertReview := &apix.ConversionReview{} + err := json.NewDecoder(r.Body).Decode(convertReview) + if err != nil { + log.Error(err, "failed to read conversion request") + w.WriteHeader(http.StatusBadRequest) + return + } + + // TODO(droot): may be move the conversion logic to a separate module to + // decouple it from the http layer ? + resp, err := wh.handleConvertRequest(convertReview.Request) + if err != nil { + log.Error(err, "failed to convert", "request", convertReview.Request.UID) + convertReview.Response = errored(err) + } else { + convertReview.Response = resp + } + convertReview.Response.UID = convertReview.Request.UID + convertReview.Request = nil + + err = json.NewEncoder(w).Encode(convertReview) + if err != nil { + log.Error(err, "failed to write response") + return + } +} + +// handles a version conversion request. +func (wh *Webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) { + if req == nil { + return nil, fmt.Errorf("conversion request is nil") + } + var objects []runtime.RawExtension + + for _, obj := range req.Objects { + src, gvk, err := wh.decoder.Decode(obj.Raw) + if err != nil { + return nil, err + } + dst, err := wh.allocateDstObject(req.DesiredAPIVersion, gvk.Kind) + if err != nil { + return nil, err + } + err = wh.convertObject(src, dst) + if err != nil { + return nil, err + } + objects = append(objects, runtime.RawExtension{Object: dst}) + } + return &apix.ConversionResponse{ + UID: req.UID, + ConvertedObjects: objects, + Result: metav1.Status{ + Status: metav1.StatusSuccess, + }, + }, nil +} + +// convertObject will convert given a src object to dst object. +// Note(droot): couldn't find a way to reduce the cyclomatic complexity under 10 +// without compromising readability, so disabling gocyclo linter +func (wh *Webhook) convertObject(src, dst runtime.Object) error { + srcGVK := src.GetObjectKind().GroupVersionKind() + dstGVK := dst.GetObjectKind().GroupVersionKind() + + if srcGVK.GroupKind() != dstGVK.GroupKind() { + return fmt.Errorf("src %T and dst %T does not belong to same API Group", src, dst) + } + + if srcGVK == dstGVK { + return fmt.Errorf("conversion is not allowed between same type %T", src) + } + + srcIsHub, dstIsHub := isHub(src), isHub(dst) + srcIsConvertible, dstIsConvertible := isConvertible(src), isConvertible(dst) + + switch { + case srcIsHub && dstIsConvertible: + return dst.(conversion.Convertible).ConvertFrom(src.(conversion.Hub)) + case dstIsHub && srcIsConvertible: + return src.(conversion.Convertible).ConvertTo(dst.(conversion.Hub)) + case srcIsConvertible && dstIsConvertible: + return wh.convertViaHub(src.(conversion.Convertible), dst.(conversion.Convertible)) + default: + return fmt.Errorf("%T is not convertible to %T", src, dst) + } +} + +func (wh *Webhook) convertViaHub(src, dst conversion.Convertible) error { + hub, err := wh.getHub(src) + if err != nil { + return err + } + + if hub == nil { + return fmt.Errorf("%s does not have any Hub defined", src) + } + + err = src.ConvertTo(hub) + if err != nil { + return fmt.Errorf("%T failed to convert to hub version %T : %w", src, hub, err) + } + + err = dst.ConvertFrom(hub) + if err != nil { + return fmt.Errorf("%T failed to convert from hub version %T : %w", dst, hub, err) + } + + return nil +} + +// getHub returns an instance of the Hub for passed-in object's group/kind. +func (wh *Webhook) getHub(obj runtime.Object) (conversion.Hub, error) { + gvks, err := objectGVKs(wh.scheme, obj) + if err != nil { + return nil, err + } + if len(gvks) == 0 { + return nil, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + var hub conversion.Hub + var hubFoundAlready bool + for _, gvk := range gvks { + instance, err := wh.scheme.New(gvk) + if err != nil { + return nil, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) + } + if val, isHub := instance.(conversion.Hub); isHub { + if hubFoundAlready { + return nil, fmt.Errorf("multiple hub version defined for %T", obj) + } + hubFoundAlready = true + hub = val + } + } + return hub, nil +} + +// allocateDstObject returns an instance for a given GVK. +func (wh *Webhook) allocateDstObject(apiVersion, kind string) (runtime.Object, error) { + gvk := schema.FromAPIVersionAndKind(apiVersion, kind) + + obj, err := wh.scheme.New(gvk) + if err != nil { + return obj, err + } + + t, err := meta.TypeAccessor(obj) + if err != nil { + return obj, err + } + + t.SetAPIVersion(apiVersion) + t.SetKind(kind) + + return obj, nil +} + +// IsConvertible determines if given type is convertible or not. For a type +// to be convertible, the group-kind needs to have a Hub type defined and all +// non-hub types must be able to convert to/from Hub. +func IsConvertible(scheme *runtime.Scheme, obj runtime.Object) (bool, error) { + var hubs, spokes, nonSpokes []runtime.Object + + gvks, err := objectGVKs(scheme, obj) + if err != nil { + return false, err + } + if len(gvks) == 0 { + return false, fmt.Errorf("error retrieving gvks for object : %v", obj) + } + + for _, gvk := range gvks { + instance, err := scheme.New(gvk) + if err != nil { + return false, fmt.Errorf("failed to allocate an instance for gvk %v: %w", gvk, err) + } + + if isHub(instance) { + hubs = append(hubs, instance) + continue + } + + if !isConvertible(instance) { + nonSpokes = append(nonSpokes, instance) + continue + } + + spokes = append(spokes, instance) + } + + if len(gvks) == 1 { + return false, nil // single version + } + + if len(hubs) == 0 && len(spokes) == 0 { + // multiple version detected with no conversion implementation. This is + // true for multi-version built-in types. + return false, nil + } + + if len(hubs) == 1 && len(nonSpokes) == 0 { // convertible + return true, nil + } + + return false, PartialImplementationError{ + hubs: hubs, + nonSpokes: nonSpokes, + spokes: spokes, + } +} + +// objectGVKs returns all (Group,Version,Kind) for the Group/Kind of given object. +func objectGVKs(scheme *runtime.Scheme, obj runtime.Object) ([]schema.GroupVersionKind, error) { + // NB: we should not use `obj.GetObjectKind().GroupVersionKind()` to get the + // GVK here, since it is parsed from apiVersion and kind fields and it may + // return empty GVK if obj is an uninitialized object. + objGVKs, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(objGVKs) != 1 { + return nil, fmt.Errorf("expect to get only one GVK for %v", obj) + } + objGVK := objGVKs[0] + knownTypes := scheme.AllKnownTypes() + + var gvks []schema.GroupVersionKind + for gvk := range knownTypes { + if objGVK.GroupKind() == gvk.GroupKind() { + gvks = append(gvks, gvk) + } + } + return gvks, nil +} + +// PartialImplementationError represents an error due to partial conversion +// implementation such as hub without spokes, multiple hubs or spokes without hub. +type PartialImplementationError struct { + gvk schema.GroupVersionKind + hubs []runtime.Object + nonSpokes []runtime.Object + spokes []runtime.Object +} + +func (e PartialImplementationError) Error() string { + if len(e.hubs) == 0 { + return fmt.Sprintf("no hub defined for gvk %s", e.gvk) + } + if len(e.hubs) > 1 { + return fmt.Sprintf("multiple(%d) hubs defined for group-kind '%s' ", + len(e.hubs), e.gvk.GroupKind()) + } + if len(e.nonSpokes) > 0 { + return fmt.Sprintf("%d inconvertible types detected for group-kind '%s'", + len(e.nonSpokes), e.gvk.GroupKind()) + } + return "" +} + +// isHub determines if passed-in object is a Hub or not. +func isHub(obj runtime.Object) bool { + _, yes := obj.(conversion.Hub) + return yes +} + +// isConvertible determines if passed-in object is a convertible. +func isConvertible(obj runtime.Object) bool { + _, yes := obj.(conversion.Convertible) + return yes +} + +// helper to construct error response. +func errored(err error) *apix.ConversionResponse { + return &apix.ConversionResponse{ + Result: metav1.Status{ + Status: metav1.StatusFailure, + Message: err.Error(), + }, + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go new file mode 100644 index 000000000..6a9e9c236 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/decoder.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// Decoder knows how to decode the contents of a CRD version conversion +// request into a concrete object. +// TODO(droot): consider reusing decoder from admission pkg for this. +type Decoder struct { + codecs serializer.CodecFactory +} + +// NewDecoder creates a Decoder given the runtime.Scheme +func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { + return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +} + +// Decode decodes the inlined object. +func (d *Decoder) Decode(content []byte) (runtime.Object, *schema.GroupVersionKind, error) { + deserializer := d.codecs.UniversalDeserializer() + return deserializer.Decode(content, nil, nil) +} + +// DecodeInto decodes the inlined object in the into the passed-in runtime.Object. +func (d *Decoder) DecodeInto(content []byte, into runtime.Object) error { + deserializer := d.codecs.UniversalDeserializer() + return runtime.DecodeInto(deserializer, content, into) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go new file mode 100644 index 000000000..2c93f0d99 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package webhook provides methods to build and bootstrap a webhook server. + +Currently, it only supports admission webhooks. It will support CRD conversion webhooks in the near future. +*/ +package webhook + +import ( + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("webhook") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go new file mode 100644 index 000000000..557004908 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go @@ -0,0 +1,85 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // RequestLatency is a prometheus metric which is a histogram of the latency + // of processing admission requests. + RequestLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "controller_runtime_webhook_latency_seconds", + Help: "Histogram of the latency of processing admission requests", + }, + []string{"webhook"}, + ) + + // RequestTotal is a prometheus metric which is a counter of the total processed admission requests. + RequestTotal = func() *prometheus.CounterVec { + return prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "controller_runtime_webhook_requests_total", + Help: "Total number of admission requests by HTTP status code.", + }, + []string{"webhook", "code"}, + ) + }() + + // RequestInFlight is a prometheus metric which is a gauge of the in-flight admission requests. + RequestInFlight = func() *prometheus.GaugeVec { + return prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "controller_runtime_webhook_requests_in_flight", + Help: "Current number of admission requests being served.", + }, + []string{"webhook"}, + ) + }() +) + +func init() { + metrics.Registry.MustRegister(RequestLatency, RequestTotal, RequestInFlight) +} + +// InstrumentedHook adds some instrumentation on top of the given webhook. +func InstrumentedHook(path string, hookRaw http.Handler) http.Handler { + lbl := prometheus.Labels{"webhook": path} + + lat := RequestLatency.MustCurryWith(lbl) + cnt := RequestTotal.MustCurryWith(lbl) + gge := RequestInFlight.With(lbl) + + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + + return promhttp.InstrumentHandlerDuration( + lat, + promhttp.InstrumentHandlerCounter( + cnt, + promhttp.InstrumentHandlerInFlight(gge, hookRaw), + ), + ) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go new file mode 100644 index 000000000..fa56be5d5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -0,0 +1,345 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + kscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" +) + +// DefaultPort is the default port that the webhook server serves. +var DefaultPort = 9443 + +// Server is an admission webhook server that can serve traffic and +// generates related k8s resources for deploying. +// +// TLS is required for a webhook to be accessed by kubernetes, so +// you must provide a CertName and KeyName or have valid cert/key +// at the default locations (tls.crt and tls.key). If you do not +// want to configure TLS (i.e for testing purposes) run an +// admission.StandaloneWebhook in your own server. +type Server struct { + // Host is the address that the server will listen on. + // Defaults to "" - all addresses. + Host string + + // Port is the port number that the server will serve. + // It will be defaulted to 9443 if unspecified. + Port int + + // CertDir is the directory that contains the server key and certificate. The + // server key and certificate. + CertDir string + + // CertName is the server certificate name. Defaults to tls.crt. + CertName string + + // KeyName is the server key name. Defaults to tls.key. + KeyName string + + // ClientCAName is the CA certificate name which server used to verify remote(client)'s certificate. + // Defaults to "", which means server does not verify client's certificate. + ClientCAName string + + // TLSVersion is the minimum version of TLS supported. Accepts + // "", "1.0", "1.1", "1.2" and "1.3" only ("" is equivalent to "1.0" for backwards compatibility) + TLSMinVersion string + + // TLSOpts is used to allow configuring the TLS config used for the server + TLSOpts []func(*tls.Config) + + // WebhookMux is the multiplexer that handles different webhooks. + WebhookMux *http.ServeMux + + // webhooks keep track of all registered webhooks for dependency injection, + // and to provide better panic messages on duplicate webhook registration. + webhooks map[string]http.Handler + + // setFields allows injecting dependencies from an external source + setFields inject.Func + + // defaultingOnce ensures that the default fields are only ever set once. + defaultingOnce sync.Once + + // started is set to true immediately before the server is started + // and thus can be used to check if the server has been started + started bool + + // mu protects access to the webhook map & setFields for Start, Register, etc + mu sync.Mutex +} + +// setDefaults does defaulting for the Server. +func (s *Server) setDefaults() { + s.webhooks = map[string]http.Handler{} + if s.WebhookMux == nil { + s.WebhookMux = http.NewServeMux() + } + + if s.Port <= 0 { + s.Port = DefaultPort + } + + if len(s.CertDir) == 0 { + s.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") + } + + if len(s.CertName) == 0 { + s.CertName = "tls.crt" + } + + if len(s.KeyName) == 0 { + s.KeyName = "tls.key" + } +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates +// the webhook server doesn't need leader election. +func (*Server) NeedLeaderElection() bool { + return false +} + +// Register marks the given webhook as being served at the given path. +// It panics if two hooks are registered on the same path. +func (s *Server) Register(path string, hook http.Handler) { + s.mu.Lock() + defer s.mu.Unlock() + + s.defaultingOnce.Do(s.setDefaults) + if _, found := s.webhooks[path]; found { + panic(fmt.Errorf("can't register duplicate path: %v", path)) + } + // TODO(directxman12): call setfields if we've already started the server + s.webhooks[path] = hook + s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) + + regLog := log.WithValues("path", path) + regLog.Info("Registering webhook") + + // we've already been "started", inject dependencies here. + // Otherwise, InjectFunc will do this for us later. + if s.setFields != nil { + if err := s.setFields(hook); err != nil { + // TODO(directxman12): swallowing this error isn't great, but we'd have to + // change the signature to fix that + regLog.Error(err, "unable to inject fields into webhook during registration") + } + + baseHookLog := log.WithName("webhooks") + + // NB(directxman12): we don't propagate this further by wrapping setFields because it's + // unclear if this is how we want to deal with log propagation. In this specific instance, + // we want to be able to pass a logger to webhooks because they don't know their own path. + if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", path), hook); err != nil { + regLog.Error(err, "unable to logger into webhook during registration") + } + } +} + +// StartStandalone runs a webhook server without +// a controller manager. +func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) error { + // Use the Kubernetes client-go scheme if none is specified + if scheme == nil { + scheme = kscheme.Scheme + } + + if err := s.InjectFunc(func(i interface{}) error { + if _, err := inject.SchemeInto(scheme, i); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + return s.Start(ctx) +} + +// tlsVersion converts from human-readable TLS version (for example "1.1") +// to the values accepted by tls.Config (for example 0x301). +func tlsVersion(version string) (uint16, error) { + switch version { + // default is previous behaviour + case "": + return tls.VersionTLS10, nil + case "1.0": + return tls.VersionTLS10, nil + case "1.1": + return tls.VersionTLS11, nil + case "1.2": + return tls.VersionTLS12, nil + case "1.3": + return tls.VersionTLS13, nil + default: + return 0, fmt.Errorf("invalid TLSMinVersion %v: expects 1.0, 1.1, 1.2, 1.3 or empty", version) + } +} + +// Start runs the server. +// It will install the webhook related resources depend on the server configuration. +func (s *Server) Start(ctx context.Context) error { + s.defaultingOnce.Do(s.setDefaults) + + baseHookLog := log.WithName("webhooks") + baseHookLog.Info("Starting webhook server") + + certPath := filepath.Join(s.CertDir, s.CertName) + keyPath := filepath.Join(s.CertDir, s.KeyName) + + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return err + } + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() + + tlsMinVersion, err := tlsVersion(s.TLSMinVersion) + if err != nil { + return err + } + + cfg := &tls.Config{ //nolint:gosec + NextProtos: []string{"h2"}, + GetCertificate: certWatcher.GetCertificate, + MinVersion: tlsMinVersion, + } + + // load CA to verify client certificate + if s.ClientCAName != "" { + certPool := x509.NewCertPool() + clientCABytes, err := os.ReadFile(filepath.Join(s.CertDir, s.ClientCAName)) + if err != nil { + return fmt.Errorf("failed to read client CA cert: %w", err) + } + + ok := certPool.AppendCertsFromPEM(clientCABytes) + if !ok { + return fmt.Errorf("failed to append client CA cert to CA pool") + } + + cfg.ClientCAs = certPool + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.TLSOpts { + op(cfg) + } + + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg) + if err != nil { + return err + } + + log.Info("Serving webhook server", "host", s.Host, "port", s.Port) + + srv := httpserver.New(s.WebhookMux) + + idleConnsClosed := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("shutting down webhook server") + + // TODO: use a context with reasonable timeout + if err := srv.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout + log.Error(err, "error shutting down the HTTP server") + } + close(idleConnsClosed) + }() + + s.mu.Lock() + s.started = true + s.mu.Unlock() + if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { + return err + } + + <-idleConnsClosed + return nil +} + +// StartedChecker returns an healthz.Checker which is healthy after the +// server has been started. +func (s *Server) StartedChecker() healthz.Checker { + config := &tls.Config{ + InsecureSkipVerify: true, // nolint:gosec // config is used to connect to our own webhook port. + } + return func(req *http.Request) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.started { + return fmt.Errorf("webhook server has not been started yet") + } + + d := &net.Dialer{Timeout: 10 * time.Second} + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config) + if err != nil { + return fmt.Errorf("webhook server is not reachable: %w", err) + } + + if err := conn.Close(); err != nil { + return fmt.Errorf("webhook server is not reachable: closing connection: %w", err) + } + + return nil + } +} + +// InjectFunc injects the field setter into the server. +func (s *Server) InjectFunc(f inject.Func) error { + s.setFields = f + + // inject fields here that weren't injected in Register because we didn't have setFields yet. + baseHookLog := log.WithName("webhooks") + for hookPath, webhook := range s.webhooks { + if err := s.setFields(webhook); err != nil { + return err + } + + // NB(directxman12): we don't propagate this further by wrapping setFields because it's + // unclear if this is how we want to deal with log propagation. In this specific instance, + // we want to be able to pass a logger to webhooks because they don't know their own path. + if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore index e256a31e0..2dc92904e 100644 --- a/vendor/sigs.k8s.io/yaml/.gitignore +++ b/vendor/sigs.k8s.io/yaml/.gitignore @@ -6,6 +6,10 @@ .project .settings/** +# Idea files +.idea/** +.idea/ + # Emacs save files *~ diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml index d20e23eff..54ed8f9cb 100644 --- a/vendor/sigs.k8s.io/yaml/.travis.yml +++ b/vendor/sigs.k8s.io/yaml/.travis.yml @@ -1,8 +1,7 @@ language: go -dist: xenial -go: - - 1.12.x - - 1.13.x +arch: arm64 +dist: focal +go: 1.15.x script: - diff -u <(echo -n) <(gofmt -d *.go) - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md index 5a651d916..e81cc426b 100644 --- a/vendor/sigs.k8s.io/yaml/README.md +++ b/vendor/sigs.k8s.io/yaml/README.md @@ -107,8 +107,8 @@ func main() { } fmt.Println(string(y)) /* Output: - name: John age: 30 + name: John */ j2, err := yaml.YAMLToJSON(y) if err != nil {