diff --git a/go.mod b/go.mod index b1e30de25643..3dff3accda71 100644 --- a/go.mod +++ b/go.mod @@ -15,11 +15,11 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-ozzo/ozzo-validation v0.0.0-00010101000000-000000000000 // indirect - github.com/golang/protobuf v1.4.2 - github.com/google/go-cmp v0.4.0 + github.com/golang/protobuf v1.4.3 + github.com/google/go-cmp v0.5.2 github.com/google/go-github v17.0.0+incompatible github.com/google/go-querystring v0.0.0-00010101000000-000000000000 // indirect - github.com/google/uuid v1.1.1 + github.com/google/uuid v1.1.2 github.com/gorilla/context v0.0.0-00010101000000-000000000000 // indirect github.com/heketi/tests v0.0.0-00010101000000-000000000000 // indirect github.com/lestrrat-go/jspointer v0.0.0-20181205001929-82fadba7561c // indirect @@ -33,44 +33,47 @@ require ( github.com/mohae/deepcopy v0.0.0-00010101000000-000000000000 // indirect github.com/onsi/ginkgo v4.5.0-origin.1+incompatible github.com/onsi/gomega v1.7.0 - github.com/opencontainers/go-digest v1.0.0-rc1 - github.com/openshift/api v0.0.0-20201102162614-9252afb032e1 - github.com/openshift/apiserver-library-go v0.0.0-20201113074933-3e1a513af676 + github.com/opencontainers/go-digest v1.0.0 + github.com/openshift/api v0.0.0-20201214114959-164a2fb63b5f + github.com/openshift/apiserver-library-go v0.0.0-20201214145556-6f1013f42f98 github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab - github.com/openshift/client-go v0.0.0-20201020082437-7737f16e53fc - github.com/openshift/library-go v0.0.0-20201102091359-c4fa0f5b3a08 + github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 + github.com/openshift/library-go v0.0.0-20201214135256-d265f469e75b github.com/pborman/uuid v1.2.0 github.com/pquerna/cachecontrol v0.0.0-00010101000000-000000000000 // indirect github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.10.0 - github.com/spf13/cobra v1.0.0 + github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.4.0 + github.com/spf13/viper v1.7.0 github.com/stretchr/objx v0.2.0 - github.com/stretchr/testify v1.4.0 + github.com/stretchr/testify v1.6.1 github.com/urfave/negroni v0.0.0-00010101000000-000000000000 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - go.etcd.io/etcd v3.3.17+incompatible - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/net v0.0.0-20200707034311-ab3426394381 - golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 + go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d google.golang.org/grpc v1.27.1 gopkg.in/src-d/go-git.v4 v4.13.1 gopkg.in/yaml.v2 v2.3.0 - k8s.io/api v0.19.2 - k8s.io/apiextensions-apiserver v0.19.2 - k8s.io/apimachinery v0.19.2 - k8s.io/apiserver v0.19.2 - k8s.io/cli-runtime v0.19.2 - k8s.io/client-go v0.19.2 - k8s.io/component-base v0.19.2 + k8s.io/api v0.20.0 + k8s.io/apiextensions-apiserver v0.20.0 + k8s.io/apimachinery v0.20.0 + k8s.io/apiserver v0.20.0 + k8s.io/cli-runtime v0.20.0 + k8s.io/client-go v0.20.0 + k8s.io/component-base v0.20.0 + k8s.io/component-helpers v0.20.0 + k8s.io/controller-manager v0.20.0 // indirect k8s.io/klog v1.0.0 - k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 - k8s.io/kubectl v0.19.2 - k8s.io/kubelet v0.19.2 - k8s.io/kubernetes v1.19.2 - k8s.io/legacy-cloud-providers v0.19.2 + k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd + k8s.io/kubectl v0.20.0 + k8s.io/kubelet v0.20.0 + k8s.io/kubernetes v1.20.0 + k8s.io/legacy-cloud-providers v0.20.0 + k8s.io/mount-utils v0.20.0 // indirect sigs.k8s.io/yaml v1.2.0 ) @@ -100,7 +103,6 @@ replace ( github.com/armon/consul-api => github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 github.com/asaskevich/govalidator => github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a github.com/auth0/go-jwt-middleware => github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7 - github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.28.2 github.com/bazelbuild/bazel-gazelle => github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798 github.com/bazelbuild/buildtools => github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89 github.com/bazelbuild/rules_go => github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab @@ -138,11 +140,6 @@ replace ( github.com/daviddengcn/go-colortext => github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd github.com/dgrijalva/jwt-go => github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/dnaeon/go-vcr => github.com/dnaeon/go-vcr v1.0.1 - github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible - github.com/docker/docker => github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 - github.com/docker/go-connections => github.com/docker/go-connections v0.3.0 - github.com/docker/go-units => github.com/docker/go-units v0.4.0 - github.com/docker/spdystream => github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/dustin/go-humanize => github.com/dustin/go-humanize v1.0.0 github.com/elazarl/goproxy => github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 // 947c36da3153 is the SHA for git tag v1.11 github.com/emicklei/go-restful => github.com/emicklei/go-restful v2.9.5+incompatible @@ -318,7 +315,7 @@ replace ( github.com/pquerna/cachecontrol => github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 github.com/pquerna/ffjson => github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20 github.com/quasilyte/go-consistent => github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c - github.com/quobyte/api => github.com/quobyte/api v0.1.2 + github.com/quobyte/api => github.com/quobyte/api v0.1.8 github.com/remyoudompheng/bigfft => github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446 github.com/robfig/cron => github.com/robfig/cron v1.1.0 github.com/rogpeppe/fastuuid => github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af @@ -344,7 +341,7 @@ replace ( github.com/spf13/jwalterweatherman => github.com/spf13/jwalterweatherman v1.1.0 github.com/spf13/pflag => github.com/spf13/pflag v1.0.5 github.com/spf13/viper => github.com/spf13/viper v1.3.2 - github.com/storageos/go-api => github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc + github.com/storageos/go-api => github.com/storageos/go-api v2.2.0+incompatible github.com/stretchr/objx => github.com/stretchr/objx v0.2.0 github.com/stretchr/testify => github.com/stretchr/testify v1.4.0 github.com/syndtr/gocapability => github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 @@ -368,7 +365,6 @@ replace ( github.com/xlab/handysort => github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1 github.com/xordataexchange/crypt => github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.3 - go.etcd.io/etcd => go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 // 3cf2f69b5738 is the SHA for git tag v3.4.3 go.mongodb.org/mongo-driver => go.mongodb.org/mongo-driver v1.1.2 go.opencensus.io => go.opencensus.io v0.21.0 go.uber.org/atomic => go.uber.org/atomic v1.3.2 @@ -382,7 +378,6 @@ replace ( golang.org/x/lint => golang.org/x/lint v0.0.0-20190409202823-959b441ac422 golang.org/x/mobile => golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6 golang.org/x/mod => golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e - golang.org/x/net => golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/perf => golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852 golang.org/x/sync => golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e @@ -396,7 +391,7 @@ replace ( gonum.org/v1/plot => gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b google.golang.org/appengine => google.golang.org/appengine v1.5.0 google.golang.org/genproto => google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 - google.golang.org/grpc => google.golang.org/grpc v1.26.0 + google.golang.org/grpc => google.golang.org/grpc v1.27.1 gopkg.in/airbrake/gobrake.v2 => gopkg.in/airbrake/gobrake.v2 v2.0.9 gopkg.in/alecthomas/kingpin.v2 => gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/check.v1 => gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 @@ -417,36 +412,38 @@ replace ( gotest.tools/gotestsum => gotest.tools/gotestsum v0.3.5 grpc.go4.org => grpc.go4.org v0.0.0-20170609214715-11d0a25b4919 honnef.co/go/tools => honnef.co/go/tools v0.0.1-2019.2.2 - k8s.io/api => k8s.io/api v0.19.2 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.19.2 - k8s.io/apimachinery => k8s.io/apimachinery v0.19.2 - k8s.io/apiserver => github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201119152602-89f5d4548df7 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.19.2 - k8s.io/client-go => github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201119152602-89f5d4548df7 - k8s.io/cloud-provider => github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201119152602-89f5d4548df7 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.19.2 - k8s.io/code-generator => k8s.io/code-generator v0.19.2 - k8s.io/component-base => k8s.io/component-base v0.19.2 - k8s.io/cri-api => k8s.io/cri-api v0.19.2 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.19.2 + k8s.io/api => k8s.io/api v0.20.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.20.0 + k8s.io/apiserver => github.com/marun/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201215040514-b7c5d629b30f + k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.0 + k8s.io/client-go => github.com/marun/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201215040514-b7c5d629b30f + k8s.io/cloud-provider => github.com/marun/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201215040514-b7c5d629b30f + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.0 + k8s.io/code-generator => k8s.io/code-generator v0.20.0 + k8s.io/component-base => k8s.io/component-base v0.20.0 + k8s.io/component-helpers => k8s.io/component-helpers v0.20.0 + k8s.io/controller-manager => github.com/marun/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20201215040514-b7c5d629b30f + k8s.io/cri-api => k8s.io/cri-api v0.20.0 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.0 k8s.io/gengo => k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 k8s.io/heapster => k8s.io/heapster v1.2.0-beta.1 k8s.io/klog => k8s.io/klog v1.0.0 - k8s.io/kube-aggregator => github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20201119152602-89f5d4548df7 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.19.2 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.19.2 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.2 - k8s.io/kubectl => k8s.io/kubectl v0.19.2 - k8s.io/kubelet => k8s.io/kubelet v0.19.2 - k8s.io/kubernetes => github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201119152602-89f5d4548df7 - k8s.io/legacy-cloud-providers => github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201119152602-89f5d4548df7 - k8s.io/metrics => k8s.io/metrics v0.19.2 + k8s.io/kube-aggregator => github.com/marun/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20201215040514-b7c5d629b30f + k8s.io/kube-controller-manager => github.com/marun/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20201215040514-b7c5d629b30f + k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.0 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.0 + k8s.io/kubectl => k8s.io/kubectl v0.20.0 + k8s.io/kubelet => k8s.io/kubelet v0.20.0 + k8s.io/kubernetes => github.com/marun/kubernetes v0.0.0-20201215040514-b7c5d629b30f + k8s.io/legacy-cloud-providers => github.com/marun/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201215040514-b7c5d629b30f + k8s.io/metrics => k8s.io/metrics v0.20.0 + k8s.io/mount-utils => github.com/marun/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20201215040514-b7c5d629b30f k8s.io/repo-infra => k8s.io/repo-infra v0.0.1-alpha.1 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.2 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.19.2 - k8s.io/sample-controller => k8s.io/sample-controller v0.19.2 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.0 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.0 + k8s.io/sample-controller => k8s.io/sample-controller v0.20.0 k8s.io/system-validators => k8s.io/system-validators v1.0.4 - k8s.io/utils => k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 modernc.org/cc => modernc.org/cc v1.0.0 modernc.org/golex => modernc.org/golex v1.0.0 modernc.org/mathutil => modernc.org/mathutil v1.0.0 @@ -460,6 +457,5 @@ replace ( sigs.k8s.io/kustomize => sigs.k8s.io/kustomize v2.0.3+incompatible sigs.k8s.io/structured-merge-diff/v3 => sigs.k8s.io/structured-merge-diff/v3 v3.0.0 sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.2.0 - sourcegraph.com/sqs/pbtypes => sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 vbom.ml/util => vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc ) diff --git a/go.sum b/go.sum index 15eaf7d8780c..38fe7624b36e 100644 --- a/go.sum +++ b/go.sum @@ -6,27 +6,26 @@ github.com/Azure/azure-sdk-for-go v43.0.0+incompatible h1:/wSNCu0e6EsHFR4Qa3vBEB github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0 h1:nQOZzFCudTh+TvquAtCRjM01VEYx85e9qbwt5ncW4L8= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/validation v0.1.0 h1:ISSNzGUh+ZSzizJWOWzs8bwpXIePbGLW4z/AmUFGH5A= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -68,8 +67,9 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4 github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7 h1:irR1cO6eek3n5uquIVaRAsQmZnlsfPuHNz31cXo4eyk= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.28.2 h1:j5IXG9CdyLfcVfICqo1PXVv+rua+QQHbkXuvuU/JF+8= -github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.6.10/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/aws/aws-sdk-go v1.35.24 h1:U3GNTg8+7xSM6OAJ8zksiSM4bRqxBWmVwwehvOSNG3A= +github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= @@ -89,6 +89,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu/v4 v4.0.2 h1:jt+rnBIhFtPw0fhtpYGcUOilh4aO9Hj7r+YLEtf30uA= github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v4 v4.1.0 h1:WW2B2uxx9KWF6bGlHqhm8Okiafwwx7Y2kcpn8lCpjgo= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed h1:/UgmF+cZTm9kp4uJ122y/9cVhczNJCgAgAeH2FfzPeg= github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= @@ -141,12 +143,19 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 h1:w3NnFcKR5241cfmQU5ZZAsf0xcpId6mWOupTvJlUX2U= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible h1:SiUATuP//KecDjpOK2tvZJgeScYAklvyjfK8JZlU6fo= +github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -178,11 +187,14 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/go-dockerclient v0.0.0-20171004212419-da3951ba2e9e/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0= github.com/fsouza/go-dockerclient v1.6.5 h1:vuFDnPcds3LvTWGYb9h0Rty14FLgkjHZdwLDROCdgsw= github.com/fsouza/go-dockerclient v1.6.5/go.mod h1:GOdftxWLWIbIWKbIMDroKFJzPdg6Iw7r+jX1DDZdVsA= +github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095 h1:F2m41rgyxoveZKD+Z6xwyAbtdNeVvhpi9BpQLvt5oRU= github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -364,6 +376,23 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= +github.com/marun/kubernetes v0.0.0-20201215040514-b7c5d629b30f h1:/YB2nQE7od2sZLngB+4QS4ZRb4eWQBDzGJS2IiF/DoE= +github.com/marun/kubernetes v0.0.0-20201215040514-b7c5d629b30f/go.mod h1:WYL8/qQDpjRt1Rh9DNGRWe3V2ovnZnRJMffzC0gC1uc= +github.com/marun/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201215040514-b7c5d629b30f h1:M6pTfJ7QnbpkXbg1ZR2vnDBe8IsLpJ+RmBkSOqRX5iM= +github.com/marun/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201215040514-b7c5d629b30f/go.mod h1:ED6HfkyjucTiWY56MfTsOW48qtYewvR5IODc4JRMc6k= +github.com/marun/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201215040514-b7c5d629b30f h1:/wT2kuOUVL9G42zw5uUx3P5C1T0r8K4kpAkArf5Zjmc= +github.com/marun/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201215040514-b7c5d629b30f/go.mod h1:MfFvfPRc3qZnRjJYRnD8KZGtlN0XC+zUZu5wu5yfUz0= +github.com/marun/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201215040514-b7c5d629b30f h1:Hj1Emc4EIL79Gj+8SrPmSn7DwB01MVCWR1iRPcagHxE= +github.com/marun/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201215040514-b7c5d629b30f/go.mod h1:UbIReMP6onJAgmEZQmjsi9PwQZnuy6mN6cA+NX63KtI= +github.com/marun/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20201215040514-b7c5d629b30f h1:goA0rgCBs86THf0WGv+vq8r21Bv72sGkJhpj4Pax/z4= +github.com/marun/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20201215040514-b7c5d629b30f/go.mod h1:C4aXxyoP3zkEJSMu98FCHF7lNDSNcBxMe13/JNk3WX8= +github.com/marun/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20201215040514-b7c5d629b30f h1:WUp1HqqrC3g22sIs+SlN00oR3/AkngFhfRZYLUeUjug= +github.com/marun/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20201215040514-b7c5d629b30f/go.mod h1:1q1yp9zAz75t3BOwP11lXWu9bKOEVr2D+Uuq/0tgaTc= +github.com/marun/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20201215040514-b7c5d629b30f/go.mod h1:G43SCvMMH8h7lJxYjXgy4Ik5i/xLkA5MQm1PHsuKQxc= +github.com/marun/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201215040514-b7c5d629b30f h1:SYb3jdz+G43Oy1WoVUpsN8ceLsvjt3sC8d/9OlvalfA= +github.com/marun/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201215040514-b7c5d629b30f/go.mod h1:SZzO0CnWBCwbiYLkYwThH9vvD4d0Q1plgf0r2WY1S+w= +github.com/marun/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20201215040514-b7c5d629b30f h1:4iQs1G1jG/kHYdN35nGTmNlF8mxn4urm/U7W212jKxM= +github.com/marun/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20201215040514-b7c5d629b30f/go.mod h1:mFeSjsWvLj55xUpwltalvolz49izW7J0N5RfoIHrKKY= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o= @@ -413,45 +442,36 @@ github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc90.0.20200616040943-82d2fa4eb069/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= -github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb h1:LcPVE5u4oaqw8ffPbJew0lUxZC7faM5t52PgU4px1xY= -github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb/go.mod h1:ZuXhqlr4EiRYgDrBDNfSbE4+n9JX4+V107NwAmF7sZA= +github.com/opencontainers/runc v1.0.0-rc92 h1:+IczUKCRzDzFDnw99O/PAqrcBBCoRp9xN3cB1SYSNS4= +github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE= github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2 h1:9mv9SC7GWmRWE0J/+oD8w3GsN2KYGKtg6uwLN7hfP5E= github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6 h1:NhsM2gc769rVWDqJvapK37r+7+CBXI8xHhnfnt8uQsg= +github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/opencontainers/selinux v1.5.2 h1:F6DgIsjgBIcDksLW4D5RG9bXok6oqZ3nvMwj4ZoFu/Q= github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= -github.com/openshift/api v0.0.0-20201019163320-c6a5ec25f267/go.mod h1:RDvBcRQMGLa3aNuDuejVBbTEQj/2i14NXdpOLqbNBvM= -github.com/openshift/api v0.0.0-20201102162614-9252afb032e1 h1:qzfwaYZFE2t+O+6K8+JiPoPkor0shm7FrQSkVNhzRjQ= -github.com/openshift/api v0.0.0-20201102162614-9252afb032e1/go.mod h1:RDvBcRQMGLa3aNuDuejVBbTEQj/2i14NXdpOLqbNBvM= -github.com/openshift/apiserver-library-go v0.0.0-20201113074933-3e1a513af676 h1:RgCGtnzLXmeFAnoCDDY+axGGHIzQYXYqZxH0mgnaX6U= -github.com/openshift/apiserver-library-go v0.0.0-20201113074933-3e1a513af676/go.mod h1:fGp6VNAiZ3Uzcyxgj5CahXv/+BrxyaAhXXRWWffgxwc= +github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/openshift/api v0.0.0-20201214114959-164a2fb63b5f h1:MhuCP7+M9hmUnZaz6EwOh3+W6FQp+BezIXbL99Q4xq4= +github.com/openshift/api v0.0.0-20201214114959-164a2fb63b5f/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= +github.com/openshift/apiserver-library-go v0.0.0-20201214145556-6f1013f42f98 h1:JUz5O4PiBoEFhf/ZvwRary38hejR6E0LDEtyNro01TM= +github.com/openshift/apiserver-library-go v0.0.0-20201214145556-6f1013f42f98/go.mod h1:bMWTKd7ZOYGyx1hVLipuA9LrIJw62V7Se99cZ/Volj8= github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab h1:lBrojddP6C9C2p67EMs2vcdpC8eF+H0DDom+fgI2IF0= github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/client-go v0.0.0-20201020074620-f8fd44879f7c/go.mod h1:yZ3u8vgWC19I9gbDMRk8//9JwG/0Sth6v7C+m6R8HXs= -github.com/openshift/client-go v0.0.0-20201020082437-7737f16e53fc h1:aeVqf+NL5rNq1uM2jvIfSQiWodd5OJ5o90ayju+vdeM= -github.com/openshift/client-go v0.0.0-20201020082437-7737f16e53fc/go.mod h1:yZ3u8vgWC19I9gbDMRk8//9JwG/0Sth6v7C+m6R8HXs= +github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 h1:7NmjUkJtGHpMTE/n8ia6itbCdZ7eYuTCXKc/zsA7OSM= +github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49/go.mod h1:9/jG4I6sh+5QublJpZZ4Zs/P4/QCXMsQQ/K/058bSB8= github.com/openshift/golang-glog v0.0.0-20160126235308-23def4e6c14b h1:oqIifWYYfTppdeF3mx7YnH6W5nvAMekv9P4kZI4iwYo= github.com/openshift/golang-glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:3sa6LKKRDnR1xy4Kn8htvPwqIOVwXh8fIU3LRY22q3U= -github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201119152602-89f5d4548df7 h1:YxfmGc9Pr3mHCCogW44DP4TamlerloUIveXLK3Bb7hM= -github.com/openshift/kubernetes v1.20.0-alpha.0.0.20201119152602-89f5d4548df7/go.mod h1:bgJpBd7PMVOqxOeG/034vAbGMyldSnabFHoKHGzTe/M= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201119152602-89f5d4548df7 h1:T0lC7kB6ug5cFaXJbL912u/yeFBiXMWXfBasj3f9Chs= -github.com/openshift/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201119152602-89f5d4548df7/go.mod h1:IBEy4XVIq1fv1YYiHNzUh0jp0L4FhdTz4p/ywMhe1ac= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201119152602-89f5d4548df7 h1:5ELxduegCB3TX2PqTcwqaoPpvFEhH+NUEGuLlzYKmBQ= -github.com/openshift/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201119152602-89f5d4548df7/go.mod h1:W3EejS0WCW4yhUWmCF3dKHOoPvXS48jUte1rr0BZ5hk= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201119152602-89f5d4548df7 h1:I+WaSSYTVRF11QBQR/iJYHWAE131cDHbzoLd1JHH6Q0= -github.com/openshift/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201119152602-89f5d4548df7/go.mod h1:tJXVBfmOkegQN9PNkU5S/KuFESUwYK/w2cINFSmuw1s= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20201119152602-89f5d4548df7 h1:GzvcBM1PoBC4C6x/mXJpVujGeJtQeQ0hkQtJQuYrH4o= -github.com/openshift/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20201119152602-89f5d4548df7/go.mod h1:Sanb51xlsAx5eIBDLF18hx6zW499rELc68UZPkhSsVc= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201119152602-89f5d4548df7 h1:cBWbuzjHsnRZFDi4fHU3Is+wIi5MNiYMrMHldvB4eHw= -github.com/openshift/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201119152602-89f5d4548df7/go.mod h1:dPm5A8LMYiCgYHHl7lvSYckvtMRo4eGPTyq91TX+/iw= -github.com/openshift/library-go v0.0.0-20201020083322-646ad9742a1e/go.mod h1:qbwvTwCy4btqEcqU3oI59CopNgcRgZUPXG4Y2jc+B4E= -github.com/openshift/library-go v0.0.0-20201102091359-c4fa0f5b3a08 h1:Z+8t3ooTH2T+J/GoCZbgaOk5WqNZgPuHlUAKMfG1FEk= -github.com/openshift/library-go v0.0.0-20201102091359-c4fa0f5b3a08/go.mod h1:1xYaYQcQsn+AyCRsvOU+Qn5z6GGiCmcblXkT/RZLVfo= +github.com/openshift/library-go v0.0.0-20201214135256-d265f469e75b h1:zKZCcZgL3jf02/jf/ldUg8TvudOFcPGIdf8kFkWh5MI= +github.com/openshift/library-go v0.0.0-20201214135256-d265f469e75b/go.mod h1:udseDnqxn5ON8i+NBjDp00fBTK0JRu1/6Y6tf6EivDE= github.com/openshift/onsi-ginkgo v4.5.0-origin.1+incompatible h1:GtzyDU5vBFU40hz4GWd1qU5FJByNljWdgkM2LtdelGk= github.com/openshift/onsi-ginkgo v4.5.0-origin.1+incompatible/go.mod h1:azqkkH4Vpp9A579CC26hicol/wViXag9rOwElif6v9E= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= @@ -491,8 +511,10 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/quobyte/api v0.1.2 h1:lPHLsuvtjFyk8WhC4uHoHRkScijIHcffTWBBP+YpzYo= -github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/quobyte/api v0.1.8 h1:+sOX1gIlC/OaLipqVZWrHgly9Kh9Qo8OygeS0mWAg30= +github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v1.1.0 h1:jk4/Hud3TTdcrJgUOBgsqrZBarcxl6ADIjSC2iniwLY= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= @@ -532,8 +554,8 @@ github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc h1:n+WYaU0kQ6WIiuEyWSgbXqkBx16irO69kYCtwVYoO5s= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= +github.com/storageos/go-api v2.2.0+incompatible h1:U0SablXoZIg06gvSlg8BCdzq1C/SkHVygOVX95Z2MU0= +github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= @@ -561,6 +583,8 @@ github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936 h1:J9gO8RJCAFlln github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= @@ -575,8 +599,8 @@ github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSf github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= @@ -594,8 +618,26 @@ golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMx golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= @@ -613,15 +655,16 @@ gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.15.1 h1:5mMS6mYvK5LVB8+ujVBC33Y8gltBo/kT6HBm6kU80G4= -google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= @@ -664,50 +707,50 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= -k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= -k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA= -k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= -k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= -k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/cli-runtime v0.19.2 h1:d4uOtKhy3ImdaKqZJ8yQgLrdtUwsJLfP4Dw7L/kVPOo= -k8s.io/cli-runtime v0.19.2/go.mod h1:CMynmJM4Yf02TlkbhKxoSzi4Zf518PukJ5xep/NaNeY= -k8s.io/cluster-bootstrap v0.19.2 h1:6/LI5EnKCcB0QiDKIsTxoCOdKZtsSwr8Xm/tEhiMv78= -k8s.io/cluster-bootstrap v0.19.2/go.mod h1:bzngsppPfdt9vAHUnDIEoMNsxD2b6XArVVH/W9PDDFk= -k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs= -k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= -k8s.io/cri-api v0.19.2 h1:ob5dyrRsZgPhfZYmerau7zL1h8HsFzqlBUfi5UxaxLs= -k8s.io/cri-api v0.19.2/go.mod h1:UN/iU9Ua0iYdDREBXNE9vqCJ7MIh/FW3VIL0d8pw7Fw= -k8s.io/csi-translation-lib v0.19.2 h1:NBfsKyTnXj00wp6fo/WmW3SWTjYoksbOiht6gzFv2bo= -k8s.io/csi-translation-lib v0.19.2/go.mod h1:9V3DYlJ6reyKD+/S/JrFyk/j0N1i8FCQYukch0JbNew= +k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI= +k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= +k8s.io/apiextensions-apiserver v0.20.0 h1:HmeP9mLET/HlIQ5gjP+1c20tgJrlshY5nUyIand3AVg= +k8s.io/apiextensions-apiserver v0.20.0/go.mod h1:ZH+C33L2Bh1LY1+HphoRmN1IQVLTShVcTojivK3N9xg= +k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8= +k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/cli-runtime v0.20.0 h1:UfTR9vGUWshJpwuekl7MqRmWumNs5tvqPj20qnmOns8= +k8s.io/cli-runtime v0.20.0/go.mod h1:C5tewU1SC1t09D7pmkk83FT4lMAw+bvMDuRxA7f0t2s= +k8s.io/cluster-bootstrap v0.20.0 h1:E3QnxUHhaCV5efsKSW0IXR5XFJTsve5oGq7BQgPey9I= +k8s.io/cluster-bootstrap v0.20.0/go.mod h1:6WZaNIBvcvL7MkPzSRKrZDIr4u+ePW2oIWoRsEFMjmE= +k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= +k8s.io/component-base v0.20.0 h1:BXGL8iitIQD+0NgW49UsM7MraNUUGDU3FBmrfUAtmVQ= +k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA= +k8s.io/component-helpers v0.20.0 h1:7Zi1fcb5nV0h03d9eeZGk71+ZWYvAN4Be+xMOZyFerc= +k8s.io/component-helpers v0.20.0/go.mod h1:nx6NOtfSfGOxnSZsDJxpGbnsVuUA1UXpwDvZIrtigNk= +k8s.io/cri-api v0.20.0 h1:NIbGU0wmC2d2Evuec8rDjOVel3sOi371fiGKW+QfOdI= +k8s.io/cri-api v0.20.0/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/csi-translation-lib v0.20.0 h1:I4wsA4/nW2low2auCUIq/ZCF58/FlCADr1OddBh0AOQ= +k8s.io/csi-translation-lib v0.20.0/go.mod h1:M4CdD66GxEI6ev8aTtsA2NkK9kIF9K5VZQMcw/SsoLs= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco= -k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-controller-manager v0.19.2/go.mod h1:29rfd3qO+zmDKPWyBYYEBw5487tHD4T22Hp4oNqiA18= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-proxy v0.19.2 h1:cDmPhs1drhA4Vpx6TOjJrr+nmFmOAhM0OEqWIpzqnYQ= -k8s.io/kube-proxy v0.19.2/go.mod h1:/07XChnL0EkYAQyZ7noQuyPYX5QOUBQECa9dsm9ScyY= -k8s.io/kube-scheduler v0.19.2 h1:xV2Yj76g62n4+g6IR7/8Nba5VfCI53CCrD6qnQ4yaH0= -k8s.io/kube-scheduler v0.19.2/go.mod h1:Mh/QNfmP0eqt7JtNUyIAsGhU2zO4j1EWel8TFizxZKo= -k8s.io/kubectl v0.19.2 h1:/Dxz9u7S0GnchLA6Avqi5k1qhZH4Fusgecj8dHsSnbk= -k8s.io/kubectl v0.19.2/go.mod h1:4ib3oj5ma6gF95QukTvC7ZBMxp60+UEAhDPjLuBIrV4= -k8s.io/kubelet v0.19.2 h1:jSo57ceStXcmx/6hzEayl+lNYLt79lPT2YexU/9LZx4= -k8s.io/kubelet v0.19.2/go.mod h1:FHHoByVWzh6kNaarXaDPAa751Oz6REcOVRyFT84L1Is= -k8s.io/metrics v0.19.2 h1:rpfp7VDWvc6hnF9keM23+3NIkqTlgG0qF2/Xhp3q2DA= -k8s.io/metrics v0.19.2/go.mod h1:IlLaAGXN0q7yrtB+SV0q3JIraf6VtlDr+iuTcX21fCU= -k8s.io/sample-apiserver v0.19.2 h1:paiwWeP21s78DwV+Yv1L1Y3YVGhRyBXswHmOh0lE6ks= -k8s.io/sample-apiserver v0.19.2/go.mod h1:XZxHsGPvUkjzos8B/lhbM4REthLt4tRMfkhN2CRXCGs= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-proxy v0.20.0 h1:Y/INM9li3azE0H7RtDmjQMuYFDTEL0bv6VQakgOJgvE= +k8s.io/kube-proxy v0.20.0/go.mod h1:R97oobM6zSh3ZqFMXi5DzCH/qJXNzua/UzcDmuQRexM= +k8s.io/kube-scheduler v0.20.0 h1:tcGAaUWyGfYVJOXo6sl2OJLVUWURmymeIRaE3UagNJk= +k8s.io/kube-scheduler v0.20.0/go.mod h1:cRTGsJU3TfQvbMJBmpoPgq9rBF5cQLpLKoOafKwdZnI= +k8s.io/kubectl v0.20.0 h1:q6HH6jILYi2lkzFqBhs63M4bKLxYlM0HpFJ///MgARA= +k8s.io/kubectl v0.20.0/go.mod h1:8x5GzQkgikz7M2eFGGuu6yOfrenwnw5g4RXOUgbjR1M= +k8s.io/kubelet v0.20.0 h1:vtQMgZ7B79vCUyEpoij2Oy6UmcJx0/D2lNXfViVTBMg= +k8s.io/kubelet v0.20.0/go.mod h1:lMdjO1NA+JZXSYtxb48pQmNERmC+vVIXIYkJIugVhl0= +k8s.io/metrics v0.20.0 h1:mu95gdtxR+bHkFOGQsKR5P7aZuDo3tE4F7UHT4eGm1w= +k8s.io/metrics v0.20.0/go.mod h1:9yiRhfr8K8sjdj2EthQQE9WvpYDvsXIV3CjN4Ruq4Jw= +k8s.io/sample-apiserver v0.20.0 h1:gF5SoH4sKkEjLs7YMl0kXLQilx5LfkNNlhf2QODsH+Y= +k8s.io/sample-apiserver v0.20.0/go.mod h1:tScvbz/BcUG46IOsu2YLt4EjBP7XeUuMzMbQt2tQYWw= k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -720,8 +763,8 @@ sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9u sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/test/extended/authorization/rbac/groups_default_rules.go b/test/extended/authorization/rbac/groups_default_rules.go index 51c8795bb8e3..df8abc9af862 100644 --- a/test/extended/authorization/rbac/groups_default_rules.go +++ b/test/extended/authorization/rbac/groups_default_rules.go @@ -15,6 +15,7 @@ import ( kuser "k8s.io/apiserver/pkg/authentication/user" "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" + rbacvalidation "k8s.io/component-helpers/auth/rbac/validation" kauthenticationapi "k8s.io/kubernetes/pkg/apis/authentication" kauthorizationapi "k8s.io/kubernetes/pkg/apis/authorization" "k8s.io/kubernetes/pkg/apis/rbac" @@ -227,12 +228,12 @@ func testGroupRules(ruleResolver validation.AuthorizationRuleResolver, group, na actualRules, err := ruleResolver.RulesFor(&kuser.DefaultInfo{Groups: []string{group}}, namespace) o.Expect(err).NotTo(o.HaveOccurred()) // our default RBAC policy should never have rule resolution errors - if cover, missing := validation.Covers(expectedRules, actualRules); !cover { + if cover, missing := rbacvalidation.Covers(expectedRules, actualRules); !cover { e2e.Failf("%s has extra permissions in namespace %q:\n%s", group, namespace, rulesToString(missing)) } // force test data to be cleaned up every so often but allow extra rules to not deadlock new changes - if cover, missing := validation.Covers(actualRules, expectedRules); !cover { + if cover, missing := rbacvalidation.Covers(actualRules, expectedRules); !cover { log := e2e.Logf if len(missing) > 15 { log = e2e.Failf diff --git a/test/extended/authorization/scopes.go b/test/extended/authorization/scopes.go index 5fad72533ca6..766355e34eb0 100644 --- a/test/extended/authorization/scopes.go +++ b/test/extended/authorization/scopes.go @@ -19,7 +19,7 @@ import ( kclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/transport" - rbacvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" + rbacvalidation "k8s.io/component-helpers/auth/rbac/validation" authorizationv1 "github.com/openshift/api/authorization/v1" buildv1 "github.com/openshift/api/build/v1" diff --git a/test/extended/imageapis/quota_admission.go b/test/extended/imageapis/quota_admission.go index b20a9a901690..18bbea498a3f 100644 --- a/test/extended/imageapis/quota_admission.go +++ b/test/extended/imageapis/quota_admission.go @@ -12,8 +12,8 @@ import ( "k8s.io/apimachinery/pkg/fields" kutilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" + quota "k8s.io/apiserver/pkg/quota/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - quota "k8s.io/kubernetes/pkg/quota/v1" e2e "k8s.io/kubernetes/test/e2e/framework" g "github.com/onsi/ginkgo" diff --git a/test/extended/networking/endpoint_admission.go b/test/extended/networking/endpoint_admission.go index b45ed82d5e1d..020d41ebc1f2 100644 --- a/test/extended/networking/endpoint_admission.go +++ b/test/extended/networking/endpoint_admission.go @@ -6,7 +6,6 @@ import ( "time" "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/serviceaccount" "github.com/apparentlymart/go-cidr/cidr" g "github.com/onsi/ginkgo" @@ -17,6 +16,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/client-go/kubernetes" ) diff --git a/test/extended/topology_manager/resourcealign.go b/test/extended/topology_manager/resourcealign.go index 58f81a06beea..5e09925a7ca5 100644 --- a/test/extended/topology_manager/resourcealign.go +++ b/test/extended/topology_manager/resourcealign.go @@ -56,7 +56,7 @@ var _ = g.Describe("[Serial][sig-node][Feature:TopologyManager] Configured clust g.Skip("found no worker nodes with the MCD running") } - topoMgrNodes = filterNodeWithTopologyManagerPolicy(mcdNodes, client, oc, kubeletconfigv1beta1.SingleNumaNodeTopologyManager) + topoMgrNodes = filterNodeWithTopologyManagerPolicy(mcdNodes, client, oc, kubeletconfigv1beta1.SingleNumaNodeTopologyManagerPolicy) deviceResourceName = getValueFromEnv(resourceNameEnvVar, defaultResourceName, "resource name") // we don't handle yet an uneven device amount on worker nodes. IOW, we expect the same amount of devices on each node diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index 6feda8036106..fc11e5a84a51 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -145,6 +145,8 @@ var annotations = map[string]string{ "[Top Level] [k8s.io] Pods should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]": "should have their auto-restart back-off timer reset on image update [Slow][NodeConformance] [sig-node] [Suite:k8s]", + "[Top Level] [k8s.io] Pods should run through the lifecycle of Pods and PodStatus [Conformance]": "should run through the lifecycle of Pods and PodStatus [Conformance] [sig-node] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [k8s.io] Pods should support pod readiness gates [NodeFeature:PodReadinessGate]": "should support pod readiness gates [NodeFeature:PodReadinessGate] [sig-node] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [k8s.io] Pods should support remote command execution over websockets [NodeConformance] [Conformance]": "should support remote command execution over websockets [NodeConformance] [Conformance] [sig-node] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -153,6 +155,8 @@ var annotations = map[string]string{ "[Top Level] [k8s.io] PrivilegedPod [NodeConformance] should enable privileged commands [LinuxOnly]": "should enable privileged commands [LinuxOnly] [sig-node] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] Probing container should *not* be restarted by liveness probe because startup probe delays it": "should *not* be restarted by liveness probe because startup probe delays it [sig-node] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]": "should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [sig-node] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[Top Level] [k8s.io] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]": "should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance] [sig-node] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -161,6 +165,10 @@ var annotations = map[string]string{ "[Top Level] [k8s.io] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]": "should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] [sig-node] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [k8s.io] Probing container should be restarted by liveness probe after startup probe enables it": "should be restarted by liveness probe after startup probe enables it [sig-node] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [k8s.io] Probing container should be restarted startup probe fails": "should be restarted startup probe fails [sig-node] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]": "should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [sig-node] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[Top Level] [k8s.io] Probing container should be restarted with a docker exec liveness probe with timeout ": "should be restarted with a docker exec liveness probe with timeout [sig-node] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -171,6 +179,10 @@ var annotations = map[string]string{ "[Top Level] [k8s.io] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]": "should have monotonically increasing restart count [NodeConformance] [Conformance] [sig-node] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [k8s.io] Probing container should not be ready until startupProbe succeeds": "should not be ready until startupProbe succeeds [sig-node] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [k8s.io] Probing container should not be ready with a docker exec readiness probe timeout ": "should not be ready with a docker exec readiness probe timeout [sig-node] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]": "with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] [sig-node] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[Top Level] [k8s.io] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]": "with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] [sig-node] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -239,14 +251,6 @@ var annotations = map[string]string{ "[Top Level] [k8s.io] [sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]": "should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[Top Level] [k8s.io] [sig-node] Hostname of Pod [Feature:SetHostnameAsFQDN] a pod with subdomain field has FQDN, hostname is shortname [Feature:SetHostnameAsFQDN]": "a pod with subdomain field has FQDN, hostname is shortname [Feature:SetHostnameAsFQDN] [Disabled:Alpha] [Suite:k8s]", - - "[Top Level] [k8s.io] [sig-node] Hostname of Pod [Feature:SetHostnameAsFQDN] a pod with subdomain field has FQDN, when setHostnameAsFQDN is set to true, the FQDN is set as hostname [Feature:SetHostnameAsFQDN]": "a pod with subdomain field has FQDN, when setHostnameAsFQDN is set to true, the FQDN is set as hostname [Feature:SetHostnameAsFQDN] [Disabled:Alpha] [Suite:k8s]", - - "[Top Level] [k8s.io] [sig-node] Hostname of Pod [Feature:SetHostnameAsFQDN] a pod without FQDN is not affected by SetHostnameAsFQDN field [Feature:SetHostnameAsFQDN]": "a pod without FQDN is not affected by SetHostnameAsFQDN field [Feature:SetHostnameAsFQDN] [Disabled:Alpha] [Suite:k8s]", - - "[Top Level] [k8s.io] [sig-node] Hostname of Pod [Feature:SetHostnameAsFQDN] a pod without subdomain field does not have FQDN [Feature:SetHostnameAsFQDN]": "a pod without subdomain field does not have FQDN [Feature:SetHostnameAsFQDN] [Disabled:Alpha] [Suite:k8s]", - "[Top Level] [k8s.io] [sig-node] Kubelet [Serial] [Slow] [k8s.io] [sig-node] experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking] resource tracking for 100 pods per node": "resource tracking for 100 pods per node [Suite:k8s]", "[Top Level] [k8s.io] [sig-node] Kubelet [Serial] [Slow] [k8s.io] [sig-node] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 0 pods per node": "resource tracking for 0 pods per node [Suite:k8s]", @@ -275,9 +279,11 @@ var annotations = map[string]string{ "[Top Level] [k8s.io] [sig-node] Pods Extended [k8s.io] Pod Container Status should never report success for a pending container": "should never report success for a pending container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] [sig-node] Pods Extended [k8s.io] Pod Container lifecycle should not create extra sandbox if all containers are done": "should not create extra sandbox if all containers are done [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance]": "should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[Top Level] [k8s.io] [sig-node] PreStop graceful pod terminated should wait until preStop hook completes the process [Flaky]": "graceful pod terminated should wait until preStop hook completes the process [Flaky] [Suite:k8s]", + "[Top Level] [k8s.io] [sig-node] PreStop graceful pod terminated should wait until preStop hook completes the process": "graceful pod terminated should wait until preStop hook completes the process [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [k8s.io] [sig-node] PreStop should call prestop when killing a pod [Conformance]": "should call prestop when killing a pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -293,13 +299,13 @@ var annotations = map[string]string{ "[Top Level] [k8s.io] [sig-node] Security Context should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]": "should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [k8s.io] [sig-node] Security Context should support seccomp alpha runtime/default annotation [Feature:Seccomp] [LinuxOnly]": "should support seccomp alpha runtime/default annotation [Feature:Seccomp] [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] [sig-node] Security Context should support seccomp default which is unconfined [LinuxOnly]": "should support seccomp default which is unconfined [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [k8s.io] [sig-node] Security Context should support seccomp alpha unconfined annotation on the container [Feature:Seccomp] [LinuxOnly]": "should support seccomp alpha unconfined annotation on the container [Feature:Seccomp] [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] [sig-node] Security Context should support seccomp runtime/default [LinuxOnly]": "should support seccomp runtime/default [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [k8s.io] [sig-node] Security Context should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp] [LinuxOnly]": "should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp] [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] [sig-node] Security Context should support seccomp unconfined on the container [LinuxOnly]": "should support seccomp unconfined on the container [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [k8s.io] [sig-node] Security Context should support seccomp default which is unconfined [Feature:Seccomp] [LinuxOnly]": "should support seccomp default which is unconfined [Feature:Seccomp] [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [k8s.io] [sig-node] Security Context should support seccomp unconfined on the pod [LinuxOnly]": "should support seccomp unconfined on the pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [k8s.io] [sig-node] Security Context should support volume SELinux relabeling [Flaky] [LinuxOnly]": "should support volume SELinux relabeling [Flaky] [LinuxOnly] [Suite:k8s]", @@ -317,6 +323,12 @@ var annotations = map[string]string{ "[Top Level] [sig-api-machinery] API data in etcd should be stored at the correct location and version for all resources [Serial]": "should be stored at the correct location and version for all resources [Serial] [Suite:openshift/conformance/serial]", + "[Top Level] [sig-api-machinery] API priority and fairness should ensure that requests can be classified by testing flow-schemas/priority-levels": "should ensure that requests can be classified by testing flow-schemas/priority-levels [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (fairness)": "should ensure that requests can't be drowned out (fairness) [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (priority)": "should ensure that requests can't be drowned out (priority) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-api-machinery] APIServer CR fields validation additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins [Suite:openshift/conformance/parallel]", "[Top Level] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]": "listing mutating webhooks should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -579,6 +591,8 @@ var annotations = map[string]string{ "[Top Level] [sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=users": "should work for user.openshift.io/v1, Resource=users [Suite:openshift/conformance/parallel]", + "[Top Level] [sig-apps] CronJob should be able to schedule after more than 100 missed schedule": "should be able to schedule after more than 100 missed schedule [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-apps] CronJob should delete failed finished jobs with limit of one job": "should delete failed finished jobs with limit of one job [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-apps] CronJob should delete successful finished jobs with limit of one successful job": "should delete successful finished jobs with limit of one successful job [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -633,6 +647,8 @@ var annotations = map[string]string{ "[Top Level] [sig-apps] Deployment should not disrupt a cloud load-balancer's connectivity during rollout": "should not disrupt a cloud load-balancer's connectivity during rollout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-apps] Deployment should run the lifecycle of a Deployment [Conformance]": "should run the lifecycle of a Deployment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [sig-apps] Deployment test Deployment ReplicaSet orphaning and adoption regarding controllerRef": "test Deployment ReplicaSet orphaning and adoption regarding controllerRef [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces should list and delete a collection of PodDisruptionBudgets": "should list and delete a collection of PodDisruptionBudgets [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -697,8 +713,6 @@ var annotations = map[string]string{ "[Top Level] [sig-apps] ReplicaSet should surface a failure condition on a common issue like exceeded quota": "should surface a failure condition on a common issue like exceeded quota [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-apps] ReplicationController [Flaky] should test the lifecycle of a ReplicationController": "[Flaky] should test the lifecycle of a ReplicationController [Suite:k8s]", - "[Top Level] [sig-apps] ReplicationController should adopt matching pods on creation [Conformance]": "should adopt matching pods on creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[Top Level] [sig-apps] ReplicationController should release no longer matching pods [Conformance]": "should release no longer matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -709,6 +723,8 @@ var annotations = map[string]string{ "[Top Level] [sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance]": "should surface a failure condition on a common issue like exceeded quota [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance]": "should test the lifecycle of a ReplicationController [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]": "Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] [Suite:k8s]", "[Top Level] [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]": "Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] [Suite:k8s]", @@ -851,15 +867,19 @@ var annotations = map[string]string{ "[Top Level] [sig-auth] ServiceAccounts should ensure a single API token exists": "should ensure a single API token exists [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-auth] ServiceAccounts should guarantee kube-root-ca.crt exist in any namespace": "should guarantee kube-root-ca.crt exist in any namespace [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-auth] ServiceAccounts should mount an API token into pods [Conformance]": "should mount an API token into pods [Conformance] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-auth] ServiceAccounts should mount projected service account token [Conformance]": "should mount projected service account token [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance]": "should run through the lifecycle of a ServiceAccount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[Top Level] [sig-auth] ServiceAccounts should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup] [Feature:TokenRequestProjection]": "should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup] [Feature:TokenRequestProjection] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-auth] ServiceAccounts should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]": "should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] [Feature:TokenRequestProjection]": "should support InClusterConfig with token rotation [Slow] [Feature:TokenRequestProjection] [Suite:k8s]", + "[Top Level] [sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow]": "should support InClusterConfig with token rotation [Slow] [Suite:k8s]", - "[Top Level] [sig-auth] ServiceAccounts should support OIDC discovery of service account issuer [Feature:ServiceAccountIssuerDiscovery]": "should support OIDC discovery of service account issuer [Feature:ServiceAccountIssuerDiscovery] [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-auth] ServiceAccounts should support OIDC discovery of service account issuer [Feature:ServiceAccountIssuerDiscovery]": "should support OIDC discovery of service account issuer [Feature:ServiceAccountIssuerDiscovery] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-auth] [Feature:NodeAuthenticator] The kubelet can delegate ServiceAccount tokens to the API server": "The kubelet can delegate ServiceAccount tokens to the API server [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -1383,7 +1403,7 @@ var annotations = map[string]string{ "[Top Level] [sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds": "should check if cluster-info dump succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes master services is included in cluster-info [Conformance]": "should check if Kubernetes master services is included in cluster-info [Conformance] [Disabled:SpecialConfig] [Suite:k8s]", + "[Top Level] [sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]": "should check if Kubernetes control plane services is included in cluster-info [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[Top Level] [sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod": "should copy a file from a running Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -1809,14 +1829,6 @@ var annotations = map[string]string{ "[Top Level] [sig-imageregistry][Serial][Suite:openshift/registry/serial] Image signature workflow can push a signed image to openshift registry and verify it": "can push a signed image to openshift registry and verify it [Suite:openshift/conformance/serial]", - "[Top Level] [sig-instrumentation] Cluster level logging implemented by Stackdriver [Feature:StackdriverLogging] [Soak] should ingest logs from applications running for a prolonged amount of time": "should ingest logs from applications running for a prolonged amount of time [Disabled:Unimplemented] [Suite:k8s]", - - "[Top Level] [sig-instrumentation] Cluster level logging implemented by Stackdriver should ingest events [Feature:StackdriverLogging]": "should ingest events [Feature:StackdriverLogging] [Disabled:Unimplemented] [Suite:k8s]", - - "[Top Level] [sig-instrumentation] Cluster level logging implemented by Stackdriver should ingest logs [Feature:StackdriverLogging]": "should ingest logs [Feature:StackdriverLogging] [Disabled:Unimplemented] [Suite:k8s]", - - "[Top Level] [sig-instrumentation] Cluster level logging implemented by Stackdriver should ingest system logs from all nodes [Feature:StackdriverLogging]": "should ingest system logs from all nodes [Feature:StackdriverLogging] [Disabled:Unimplemented] [Suite:k8s]", - "[Top Level] [sig-instrumentation] Cluster level logging using Elasticsearch [Feature:Elasticsearch] should check that logs from containers are ingested into Elasticsearch": "should check that logs from containers are ingested into Elasticsearch [Disabled:Unimplemented] [Suite:k8s]", "[Top Level] [sig-instrumentation] Events API should delete a collection of events [Conformance]": "should delete a collection of events [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -1827,13 +1839,13 @@ var annotations = map[string]string{ "[Top Level] [sig-instrumentation] Logging soak [Performance] [Slow] [Disruptive] should survive logging 1KB every 1s seconds, for a duration of 2m0s": "should survive logging 1KB every 1s seconds, for a duration of 2m0s [Serial] [Suite:k8s]", - "[Top Level] [sig-instrumentation] MetricsGrabber should grab all metrics from API server.": "should grab all metrics from API server. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-instrumentation] MetricsGrabber should grab all metrics from API server.": "should grab all metrics from API server. [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-instrumentation] MetricsGrabber should grab all metrics from a ControllerManager.": "should grab all metrics from a ControllerManager. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-instrumentation] MetricsGrabber should grab all metrics from a ControllerManager.": "should grab all metrics from a ControllerManager. [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-instrumentation] MetricsGrabber should grab all metrics from a Kubelet.": "should grab all metrics from a Kubelet. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-instrumentation] MetricsGrabber should grab all metrics from a Kubelet.": "should grab all metrics from a Kubelet. [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler.": "should grab all metrics from a Scheduler. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler.": "should grab all metrics from a Scheduler. [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-instrumentation] Prometheus when installed on the cluster should have a AlertmanagerReceiversNotConfigured alert in firing state": "should have a AlertmanagerReceiversNotConfigured alert in firing state [Suite:openshift/conformance/parallel]", @@ -1903,8 +1915,6 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service": "should be able to preserve UDP traffic when server pod cycles for a NodePort service [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] DNS configMap federations [Feature:Federation] should be able to change federation configuration [Slow][Serial]": "should be able to change federation configuration [Slow][Serial] [Disabled:SpecialConfig] [Suite:k8s]", - "[Top Level] [sig-network] DNS configMap nameserver Change stubDomain should be able to change stubDomain configuration [Slow][Serial]": "should be able to change stubDomain configuration [Slow][Serial] [Disabled:SpecialConfig] [Suite:k8s]", "[Top Level] [sig-network] DNS configMap nameserver Forward PTR lookup should forward PTR records lookup to upstream nameserver [Slow][Serial]": "should forward PTR records lookup to upstream nameserver [Slow][Serial] [Disabled:SpecialConfig] [Suite:k8s]", @@ -1933,7 +1943,7 @@ var annotations = map[string]string{ "[Top Level] [sig-network] DNS should support configurable pod resolv.conf": "should support configurable pod resolv.conf [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] ESIPP [Slow] should handle updates to ExternalTrafficPolicy field": "should handle updates to ExternalTrafficPolicy field [Suite:k8s]", + "[Top Level] [sig-network] ESIPP [Slow] should handle updates to ExternalTrafficPolicy field [DisabledForLargeClusters]": "should handle updates to ExternalTrafficPolicy field [DisabledForLargeClusters] [Suite:k8s]", "[Top Level] [sig-network] ESIPP [Slow] should only target nodes with endpoints": "should only target nodes with endpoints [Suite:k8s]", @@ -1967,13 +1977,13 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Internal connectivity for TCP and UDP on ports 9000-9999 is allowed": "for TCP and UDP on ports 9000-9999 is allowed [Suite:openshift/conformance/parallel]", - "[Top Level] [sig-network] Loadbalancing: L7 GCE [Slow] [Feature:Ingress] multicluster ingress should get instance group annotation": "multicluster ingress should get instance group annotation [Suite:k8s]", + "[Top Level] [sig-network] KubeProxy should resolve connection reset issue #74839 [Slow]": "should resolve connection reset issue #74839 [Slow] [Suite:k8s]", - "[Top Level] [sig-network] Loadbalancing: L7 GCE [Slow] [Feature:Ingress] should conform to Ingress spec": "should conform to Ingress spec [Suite:k8s]", + "[Top Level] [sig-network] KubeProxy should set TCP CLOSE_WAIT timeout [Privileged]": "should set TCP CLOSE_WAIT timeout [Privileged] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-network] Loadbalancing: L7 GCE [Slow] [Feature:Ingress] should create ingress with pre-shared certificate": "should create ingress with pre-shared certificate [Suite:k8s]", + "[Top Level] [sig-network] Loadbalancing: L7 GCE [Slow] [Feature:Ingress] multicluster ingress should get instance group annotation": "multicluster ingress should get instance group annotation [Suite:k8s]", - "[Top Level] [sig-network] Loadbalancing: L7 GCE [Slow] [Feature:Ingress] should support multiple TLS certs": "should support multiple TLS certs [Suite:k8s]", + "[Top Level] [sig-network] Loadbalancing: L7 GCE [Slow] [Feature:Ingress] should conform to Ingress spec": "should conform to Ingress spec [Suite:k8s]", "[Top Level] [sig-network] Loadbalancing: L7 GCE [Slow] [Feature:NEG] rolling update backend pods should not cause service disruption": "rolling update backend pods should not cause service disruption [Suite:k8s]", @@ -2005,9 +2015,13 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Loadbalancing: L7 [Slow] Nginx should conform to Ingress spec": "should conform to Ingress spec [Suite:k8s]", - "[Top Level] [sig-network] Network should resolve connection reset issue #74839 [Slow]": "should resolve connection reset issue #74839 [Slow] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy API should support creating NetworkPolicy API operations": "should support creating NetworkPolicy API operations [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-network] NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][Disruptive] NetworkPolicy between server and client using SCTP should enforce policy based on Ports [Feature:NetworkPolicy]": "should enforce policy based on Ports [Feature:NetworkPolicy] [Disabled:Alpha] [Serial] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", - "[Top Level] [sig-network] Network should set TCP CLOSE_WAIT timeout [Privileged]": "should set TCP CLOSE_WAIT timeout [Privileged] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-network] NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][Disruptive] NetworkPolicy between server and client using SCTP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]": "should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Alpha] [Serial] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + + "[Top Level] [sig-network] NetworkPolicy [Feature:SCTPConnectivity][LinuxOnly][Disruptive] NetworkPolicy between server and client using SCTP should support a 'default-deny' policy [Feature:NetworkPolicy]": "should support a 'default-deny' policy [Feature:NetworkPolicy] [Disabled:Alpha] [Serial] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", "[Top Level] [sig-network] NetworkPolicy [LinuxOnly] NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy]": "should allow egress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2069,10 +2083,14 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]": "should function for intra-pod communication: http [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]": "should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive] [Disabled:Alpha] [Serial] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance]": "should function for intra-pod communication: udp [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[Top Level] [sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]": "should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Pods should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive]": "should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity][Disruptive] [Disabled:Alpha] [Serial] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]": "should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[Top Level] [sig-network] Networking Granular Checks: Services should be able to handle large requests: http": "should be able to handle large requests: http [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2085,18 +2103,30 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Networking Granular Checks: Services should function for endpoint-Service: http": "should function for endpoint-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for endpoint-Service: sctp [Feature:SCTPConnectivity][Disruptive]": "should function for endpoint-Service: sctp [Feature:SCTPConnectivity][Disruptive] [Disabled:Alpha] [Serial] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for endpoint-Service: udp": "should function for endpoint-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for multiple endpoint-Services with same selector": "should function for multiple endpoint-Services with same selector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for node-Service: http": "should function for node-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for node-Service: sctp [Feature:SCTPConnectivity][Disruptive]": "should function for node-Service: sctp [Feature:SCTPConnectivity][Disruptive] [Disabled:Alpha] [Serial] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for node-Service: udp": "should function for node-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for pod-Service(hostNetwork): udp": "should function for pod-Service(hostNetwork): udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for pod-Service: http": "should function for pod-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-network] Networking Granular Checks: Services should function for pod-Service: sctp [Feature:SCTPConnectivity]": "should function for pod-Service: sctp [Feature:SCTPConnectivity] [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive]": "should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive] [Disabled:Alpha] [Serial] [Suite:k8s]", "[Top Level] [sig-network] Networking Granular Checks: Services should function for pod-Service: udp": "should function for pod-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should function for service endpoints using hostNetwork": "should function for service endpoints using hostNetwork [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-network] Networking Granular Checks: Services should support basic nodePort: udp functionality": "should support basic nodePort: udp functionality [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-network] Networking Granular Checks: Services should update endpoints: http": "should update endpoints: http [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] Networking Granular Checks: Services should update endpoints: udp": "should update endpoints: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2105,14 +2135,10 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Networking Granular Checks: Services should update nodePort: udp [Slow]": "should update nodePort: udp [Slow] [Suite:k8s]", - "[Top Level] [sig-network] Networking IPerf IPv4 [Experimental] [Feature:Networking-IPv4] [Slow] [Feature:Networking-Performance] should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients)": "should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients) [Suite:k8s]", - - "[Top Level] [sig-network] Networking IPerf IPv6 [Experimental] [Feature:Networking-IPv6] [Slow] [Feature:Networking-Performance] [LinuxOnly] should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients)": "should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients) [Disabled:Unimplemented] [Suite:k8s]", + "[Top Level] [sig-network] Networking IPerf [Experimental] [Slow] [Feature:Networking-Performance] should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients)": "should transfer ~ 1GB onto the service endpoint 1 servers (maximum of 1 clients) [Suite:k8s]", "[Top Level] [sig-network] Networking should check kube-proxy urls": "should check kube-proxy urls [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-network] Networking should function for pod-pod: sctp [Feature:SCTPConnectivity]": "should function for pod-pod: sctp [Feature:SCTPConnectivity] [Disabled:Alpha] [Suite:k8s]", - "[Top Level] [sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4]": "should provide Internet connection for containers [Feature:Networking-IPv4] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv6][Experimental][LinuxOnly]": "should provide Internet connection for containers [Feature:Networking-IPv6][Experimental][LinuxOnly] [Disabled:Unimplemented] [Skipped:azure] [Suite:k8s]", @@ -2215,16 +2241,46 @@ var annotations = map[string]string{ "[Top Level] [sig-network] Services should work after restarting kube-proxy [Disruptive]": "should work after restarting kube-proxy [Disruptive] [Serial] [Suite:k8s]", + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should be able to handle large requests: http": "should be able to handle large requests: http [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should be able to handle large requests: udp": "should be able to handle large requests: udp [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should function for client IP based session affinity: http [LinuxOnly]": "should function for client IP based session affinity: http [LinuxOnly] [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should function for client IP based session affinity: udp [LinuxOnly]": "should function for client IP based session affinity: udp [LinuxOnly] [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should function for endpoint-Service: http": "should function for endpoint-Service: http [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should function for endpoint-Service: udp": "should function for endpoint-Service: udp [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should function for node-Service: http": "should function for node-Service: http [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should function for node-Service: udp": "should function for node-Service: udp [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should function for pod-Service: http": "should function for pod-Service: http [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive]": "should function for pod-Service: sctp [Feature:SCTPConnectivity][Disruptive] [Disabled:Alpha] [Serial] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should function for pod-Service: udp": "should function for pod-Service: udp [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should update endpoints: http": "should update endpoints: http [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] Granular Checks: Services Secondary IP Family should update endpoints: udp": "should update endpoints: udp [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should be able to reach pod on ipv4 and ipv6 ip [Feature:IPv6DualStackAlphaFeature:Phase2]": "should be able to reach pod on ipv4 and ipv6 ip [Feature:IPv6DualStackAlphaFeature:Phase2] [Disabled:Alpha] [Suite:k8s]", - "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should create pod, add ipv6 and ipv4 ip to pod ips": "should create pod, add ipv6 and ipv4 ip to pod ips [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should create a single stack service with cluster ip from primary service range [Feature:IPv6DualStackAlphaFeature:Phase2]": "should create a single stack service with cluster ip from primary service range [Feature:IPv6DualStackAlphaFeature:Phase2] [Disabled:Alpha] [Suite:k8s]", - "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should create service with cluster ip from primary service range [Feature:IPv6DualStackAlphaFeature:Phase2]": "should create service with cluster ip from primary service range [Feature:IPv6DualStackAlphaFeature:Phase2] [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should create pod, add ipv6 and ipv4 ip to pod ips": "should create pod, add ipv6 and ipv4 ip to pod ips [Disabled:Alpha] [Suite:k8s]", "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should create service with ipv4 cluster ip [Feature:IPv6DualStackAlphaFeature:Phase2]": "should create service with ipv4 cluster ip [Feature:IPv6DualStackAlphaFeature:Phase2] [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should create service with ipv4,v6 cluster ip [Feature:IPv6DualStackAlphaFeature:Phase2]": "should create service with ipv4,v6 cluster ip [Feature:IPv6DualStackAlphaFeature:Phase2] [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should create service with ipv6 cluster ip [Feature:IPv6DualStackAlphaFeature:Phase2]": "should create service with ipv6 cluster ip [Feature:IPv6DualStackAlphaFeature:Phase2] [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should create service with ipv6,v4 cluster ip [Feature:IPv6DualStackAlphaFeature:Phase2]": "should create service with ipv6,v4 cluster ip [Feature:IPv6DualStackAlphaFeature:Phase2] [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should have ipv4 and ipv6 internal node ip": "should have ipv4 and ipv6 internal node ip [Disabled:Alpha] [Suite:k8s]", "[Top Level] [sig-network] [Feature:IPv6DualStackAlphaFeature] [LinuxOnly] should have ipv4 and ipv6 node podCIDRs": "should have ipv4 and ipv6 node podCIDRs [Disabled:Alpha] [Suite:k8s]", @@ -2333,17 +2389,21 @@ var annotations = map[string]string{ "[Top Level] [sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance]": "should run the lifecycle of PodTemplates [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[Top Level] [sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with an unconfigured handler": "should reject a Pod requesting a RuntimeClass with an unconfigured handler [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance]": " should support RuntimeClasses API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[Top Level] [sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]": "should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with conflicting node selector": "should reject a Pod requesting a RuntimeClass with conflicting node selector [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-node] RuntimeClass should reject a Pod requesting a deleted RuntimeClass": "should reject a Pod requesting a deleted RuntimeClass [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-node] RuntimeClass should reject a Pod requesting a deleted RuntimeClass [NodeFeature:RuntimeHandler]": "should reject a Pod requesting a deleted RuntimeClass [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-node] RuntimeClass should reject a Pod requesting a non-existent RuntimeClass": "should reject a Pod requesting a non-existent RuntimeClass [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-node] RuntimeClass should reject a Pod requesting a non-existent RuntimeClass [NodeFeature:RuntimeHandler]": "should reject a Pod requesting a non-existent RuntimeClass [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]": "should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling [NodeFeature:RuntimeHandler] [Disruptive] ": "should run a Pod requesting a RuntimeClass with scheduling [NodeFeature:RuntimeHandler] [Disruptive] [Disabled:Broken] [Serial] [Suite:k8s]", + "[Top Level] [sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling with taints [Serial] ": "should run a Pod requesting a RuntimeClass with scheduling with taints [Serial] [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling without taints ": "should run a Pod requesting a RuntimeClass with scheduling without taints [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-node] supplemental groups Ensure supplemental groups propagate to docker should propagate requested groups to the container [Local]": "should propagate requested groups to the container [Local]", @@ -2417,6 +2477,8 @@ var annotations = map[string]string{ "[Top Level] [sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance]": "runs ReplicaSets to verify preemption running path [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "[Top Level] [sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance]": "verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "[Top Level] [sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance]": "validates basic preemption works [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", "[Top Level] [sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance]": "validates lower priority pod preemption by critical pod [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", @@ -2431,10 +2493,6 @@ var annotations = map[string]string{ "[Top Level] [sig-scheduling] [Feature:GPUDevicePlugin] run Nvidia GPU Device Plugin tests": "run Nvidia GPU Device Plugin tests [Disabled:SpecialConfig] [Suite:k8s]", - "[Top Level] [sig-service-catalog] [Feature:PodPreset] PodPreset should create a pod preset": "should create a pod preset [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-service-catalog] [Feature:PodPreset] PodPreset should not modify the pod on conflict": "should not modify the pod on conflict [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": "should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume": "should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2447,25 +2505,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2473,15 +2531,15 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2489,17 +2547,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -2537,7 +2605,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2555,41 +2625,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -2625,6 +2699,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2633,17 +2709,13 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should create snapshot objects correctly": "should create snapshot objects correctly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should delete the VolumeSnapshotContent according to its deletion policy": "should delete the VolumeSnapshotContent according to its deletion policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should restore from snapshot with saved data after modifying source data": "should restore from snapshot with saved data after modifying source data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should create snapshot objects correctly": "should create snapshot objects correctly [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should delete the VolumeSnapshotContent according to its deletion policy": "should delete the VolumeSnapshotContent according to its deletion policy [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should restore from snapshot with saved data after modifying source data": "should restore from snapshot with saved data after modifying source data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) [Feature:GenericEphemeralVolume] (immediate-binding)] ephemeral should create read-only inline ephemeral volume": "should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2661,7 +2733,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) [Feature:GenericEphemeralVolume] (late-binding)] ephemeral should support two pods which share the same volume": "should support two pods which share the same volume [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -2697,7 +2769,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2719,33 +2791,33 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -2781,7 +2853,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2795,25 +2867,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2823,6 +2895,10 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": "should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume": "should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -2835,25 +2911,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -2861,15 +2937,15 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -2877,17 +2953,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", - - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", @@ -2925,7 +3011,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -2943,41 +3031,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", @@ -3013,6 +3105,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -3021,17 +3115,13 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should create snapshot objects correctly": "should create snapshot objects correctly [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should delete the VolumeSnapshotContent according to its deletion policy": "should delete the VolumeSnapshotContent according to its deletion policy [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should restore from snapshot with saved data after modifying source data": "should restore from snapshot with saved data after modifying source data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should create snapshot objects correctly": "should create snapshot objects correctly [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress[Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial]": "should support snapshotting of many volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should delete the VolumeSnapshotContent according to its deletion policy": "should delete the VolumeSnapshotContent according to its deletion policy [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should restore from snapshot with saved data after modifying source data": "should restore from snapshot with saved data after modifying source data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Generic Ephemeral-volume (default fs) [Feature:GenericEphemeralVolume] (immediate-binding)] ephemeral should create read-only inline ephemeral volume": "should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -3049,7 +3139,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Generic Ephemeral-volume (default fs) [Feature:GenericEphemeralVolume] (late-binding)] ephemeral should support two pods which share the same volume": "should support two pods which share the same volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", @@ -3085,7 +3175,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -3107,33 +3197,33 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", @@ -3169,7 +3259,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -3183,25 +3273,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -3211,6 +3301,18 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data": "should store data [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io][Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable[Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion": "should check snapshot fields, check restore correctly works after modifying source data, check deletion [Disabled:Broken] [Skipped:gce] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI mock volume CSI CSIDriver deployment after pod creation using non-attachable mock driver should bringup pod after deploying CSIDriver attach=false [Slow]": "should bringup pod after deploying CSIDriver attach=false [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI mock volume CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=File": "should modify fsGroup if fsGroupPolicy=File [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI mock volume CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=default": "should modify fsGroup if fsGroupPolicy=default [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI mock volume CSI FSGroupPolicy [LinuxOnly] should not modify fsGroup if fsGroupPolicy=None": "should not modify fsGroup if fsGroupPolicy=None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI mock volume CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage ephemeral error": "should call NodeUnstage after NodeStage ephemeral error [Suite:k8s]", "[Top Level] [sig-storage] CSI mock volume CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage success": "should call NodeUnstage after NodeStage success [Suite:k8s]", @@ -3221,6 +3323,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI mock volume CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage final error": "should retry NodeStage after NodeStage final error [Suite:k8s]", + "[Top Level] [sig-storage] CSI mock volume CSI Volume Snapshots [Feature:VolumeSnapshotDataSource] volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists": "volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] CSI mock volume CSI Volume expansion should expand volume by restarting pod if attach=off, nodeExpansion=on": "should expand volume by restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] CSI mock volume CSI Volume expansion should expand volume by restarting pod if attach=on, nodeExpansion=on": "should expand volume by restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3251,6 +3355,12 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] CSI mock volume CSI workload information using mock driver should not be passed when podInfoOnMount=nil": "should not be passed when podInfoOnMount=nil [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] CSI mock volume CSIServiceAccountToken [Feature:CSIServiceAccountToken] token should be plumbed down when csiServiceAccountTokenEnabled=true": "token should be plumbed down when csiServiceAccountTokenEnabled=true [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI mock volume CSIServiceAccountToken [Feature:CSIServiceAccountToken] token should not be plumbed down when CSIDriver is not deployed": "token should not be plumbed down when CSIDriver is not deployed [Disabled:Alpha] [Suite:k8s]", + + "[Top Level] [sig-storage] CSI mock volume CSIServiceAccountToken [Feature:CSIServiceAccountToken] token should not be plumbed down when csiServiceAccountTokenEnabled=false": "token should not be plumbed down when csiServiceAccountTokenEnabled=false [Disabled:Alpha] [Suite:k8s]", + "[Top Level] [sig-storage] CSI mock volume CSIStorageCapacity [Feature:CSIStorageCapacity] CSIStorageCapacity disabled": "CSIStorageCapacity disabled [Disabled:Alpha] [Suite:k8s]", "[Top Level] [sig-storage] CSI mock volume CSIStorageCapacity [Feature:CSIStorageCapacity] CSIStorageCapacity unused": "CSIStorageCapacity unused [Disabled:Alpha] [Suite:k8s]", @@ -3349,6 +3459,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] Dynamic Provisioning [k8s.io] GlusterDynamicProvisioner should create and delete persistent volumes [fast]": "should create and delete persistent volumes [fast] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] EmptyDir volumes pod should support memory backed volumes of specified size": "pod should support memory backed volumes of specified size [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance]": "pod should support shared volumes between containers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[Top Level] [sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]": "should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -3499,25 +3611,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3525,15 +3637,15 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3541,17 +3653,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -3589,7 +3711,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3607,41 +3731,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -3677,6 +3805,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3685,7 +3815,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -3721,7 +3851,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3743,33 +3873,33 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -3805,7 +3935,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3819,25 +3949,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: aws] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3851,25 +3981,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3877,15 +4007,15 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3893,17 +4023,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -3941,7 +4081,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -3959,41 +4101,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -4029,6 +4175,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4037,7 +4185,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -4073,7 +4221,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4095,33 +4243,33 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -4157,7 +4305,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4171,25 +4319,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4203,25 +4351,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Disabled:Unsupported] [Suite:k8s]", @@ -4229,17 +4377,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", @@ -4247,17 +4395,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Unsupported] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -4295,7 +4453,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", @@ -4313,41 +4473,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -4383,6 +4547,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", @@ -4391,7 +4557,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -4427,7 +4593,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", @@ -4449,35 +4615,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -4513,7 +4679,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", @@ -4527,25 +4693,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", @@ -4559,25 +4725,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4585,15 +4751,15 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4601,17 +4767,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -4649,7 +4825,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4667,41 +4845,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -4737,6 +4919,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4745,7 +4929,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -4781,7 +4965,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4803,33 +4987,33 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -4865,7 +5049,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4879,25 +5063,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4911,25 +5095,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4937,17 +5121,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -4955,17 +5139,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -5003,7 +5197,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5021,41 +5217,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -5091,6 +5291,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5099,7 +5301,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -5135,7 +5337,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5157,35 +5359,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -5221,7 +5423,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5235,25 +5437,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5267,25 +5469,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5293,15 +5495,15 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5309,17 +5511,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -5357,7 +5569,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5375,41 +5589,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -5445,6 +5663,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5453,7 +5673,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -5489,7 +5709,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5511,33 +5731,33 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -5573,7 +5793,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5587,25 +5807,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gcepd] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5619,25 +5839,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5645,17 +5865,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:ibmcloud] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5663,17 +5883,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:ibmcloud] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", @@ -5711,7 +5941,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:ibmcloud] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5729,41 +5961,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Skipped:ibmcloud] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", @@ -5799,6 +6035,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5807,7 +6045,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", @@ -5843,7 +6081,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5865,35 +6103,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", @@ -5929,7 +6167,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5943,25 +6181,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:ibmcloud] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:ibmcloud] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:ibmcloud] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: gluster] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:ibmcloud] [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -5975,25 +6213,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6001,17 +6239,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6019,17 +6257,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -6067,7 +6315,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6085,41 +6335,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -6155,6 +6409,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6163,7 +6419,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -6199,7 +6455,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6221,35 +6477,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -6285,7 +6541,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6299,25 +6555,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6331,25 +6587,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6357,17 +6613,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6375,17 +6631,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -6423,7 +6689,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6441,41 +6709,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -6511,6 +6783,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6519,7 +6793,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -6555,7 +6829,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6577,35 +6851,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -6641,7 +6915,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6655,25 +6929,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -6687,25 +6961,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Disabled:Broken] [Suite:k8s]", @@ -6715,13 +6989,13 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Disabled:Broken] [Suite:k8s]", @@ -6729,6 +7003,18 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", @@ -6737,9 +7023,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Broken] [Suite:k8s]", @@ -6777,7 +7061,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", @@ -6795,32 +7081,36 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", @@ -6829,7 +7119,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Broken] [Suite:k8s]", @@ -6865,6 +7155,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Disabled:Broken] [Suite:k8s]", @@ -6873,7 +7165,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Broken] [Suite:k8s]", @@ -6909,7 +7201,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", @@ -6931,33 +7223,33 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Broken] [Suite:k8s]", @@ -6993,7 +7285,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", @@ -7007,25 +7299,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: iscsi][Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", @@ -7039,25 +7331,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7065,15 +7357,15 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7081,17 +7373,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7129,7 +7431,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7147,41 +7451,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7217,6 +7525,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7225,7 +7535,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7261,7 +7571,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7283,33 +7593,33 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7345,7 +7655,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7359,25 +7669,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7391,25 +7701,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7417,17 +7727,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7435,19 +7745,29 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]": "should fail if subpath directory is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7483,7 +7803,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7501,41 +7823,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7571,6 +7897,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7579,7 +7907,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7615,7 +7943,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7637,35 +7965,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7701,7 +8029,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7715,25 +8043,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7747,25 +8075,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7773,17 +8101,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7791,17 +8119,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7839,7 +8177,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7857,41 +8197,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7927,6 +8271,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7935,7 +8281,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -7971,7 +8317,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -7993,35 +8339,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8057,7 +8403,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8071,25 +8417,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8103,25 +8449,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8129,17 +8475,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8147,17 +8493,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8195,7 +8551,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8213,41 +8571,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8283,6 +8645,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8291,7 +8655,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8327,7 +8691,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8349,35 +8713,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8413,7 +8777,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8427,25 +8791,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8459,25 +8823,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8485,17 +8849,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8503,17 +8867,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8551,7 +8925,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8569,41 +8945,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8639,6 +9019,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8647,7 +9029,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8683,7 +9065,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8705,35 +9087,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8769,7 +9151,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8783,25 +9165,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8815,25 +9197,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8841,17 +9223,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8859,17 +9241,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8907,7 +9299,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -8925,41 +9319,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -8995,6 +9393,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9003,7 +9403,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -9039,7 +9439,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9061,35 +9461,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -9125,7 +9525,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9139,25 +9539,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9171,25 +9571,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9197,17 +9597,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9215,17 +9615,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", @@ -9263,7 +9673,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9281,41 +9693,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", @@ -9351,6 +9767,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9359,7 +9777,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", @@ -9395,7 +9813,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9417,35 +9835,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", @@ -9481,7 +9899,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9495,25 +9913,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Skipped:gce] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", @@ -9527,25 +9945,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9553,17 +9971,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9571,17 +9989,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -9619,7 +10047,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9637,41 +10067,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -9707,6 +10141,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9715,7 +10151,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -9751,7 +10187,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9773,35 +10209,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -9837,7 +10273,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9851,25 +10287,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: local][LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9883,25 +10319,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9909,17 +10345,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9927,17 +10363,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Broken] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -9975,7 +10421,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -9993,41 +10441,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -10063,6 +10515,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10071,7 +10525,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -10107,7 +10561,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10129,35 +10583,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -10193,7 +10647,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10207,25 +10661,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10239,25 +10693,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Disabled:Unsupported] [Suite:k8s]", @@ -10265,15 +10719,15 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", @@ -10281,17 +10735,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -10329,7 +10793,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", @@ -10347,41 +10813,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Disabled:Unsupported] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -10417,6 +10887,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", @@ -10425,7 +10897,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -10461,7 +10933,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", @@ -10483,33 +10955,33 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", @@ -10545,7 +11017,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", @@ -10559,25 +11031,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Unsupported] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Disabled:Unsupported] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Unsupported] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: rbd][Feature:Volumes][Serial] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Disabled:Unsupported] [Suite:k8s]", @@ -10591,25 +11063,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10617,17 +11089,17 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow]": "should fail in binding dynamic provisioned PV to PVC [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10635,17 +11107,27 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -10683,7 +11165,9 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10701,41 +11185,45 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -10771,6 +11259,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10779,7 +11269,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -10815,7 +11305,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10837,35 +11327,35 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted": "should be able to unmount after the subpath directory is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", @@ -10901,7 +11391,7 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]": "should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10915,25 +11405,25 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node": "should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node": "should concurrently access the single volume from pods on the same node [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", - "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod": "should not mount / map unused volumes in a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -10943,6 +11433,380 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]": "should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents": "(Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents": "(OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]": "should fail if subpath directory is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]": "should fail if subpath file is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]": "should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": "should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": "should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory": "should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly]": "should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly]": "should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path": "should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount": "should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": "should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": "should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]": "should support restarting containers using file as subpath [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]": "should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]": "should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial]": "multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (default fs)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ext3)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial]": "should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies": "should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies": "should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand Verify if offline PVC expansion works": "Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)(allowExpansion)][sig-windows] volume-expand should resize volume when PVC is edited while pod is using it": "should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options": "should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source in parallel [Slow]": "should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with pvc data source": "should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]": "should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]": "should fail if subpath directory is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]": "should fail if subpath file is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]": "should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support creating multiple subpath from same volumes [Slow]": "should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directories when readOnly specified in the volumeSource": "should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing directory": "should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support existing single file [LinuxOnly]": "should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support file as subpath [LinuxOnly]": "should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support non-existent path": "should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly directory specified in the volumeMount": "should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": "should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using directory as subpath [Slow]": "should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]": "should support restarting containers using file as subpath [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]": "should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]": "should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property": "should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (xfs)][Slow] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Dynamic PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]": "should fail if subpath directory is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]": "should fail if subpath file is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]": "should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": "should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": "should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support existing directory": "should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly]": "should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly]": "should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path": "should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount": "should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": "should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]": "should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]": "should support restarting containers using file as subpath [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]": "should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]": "should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (default fs)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (ext3)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (xfs)][Slow] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Inline-volume (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.": "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow]": "should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly]": "should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]": "should fail if non-existent subpath is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow][LinuxOnly]": "should fail if subpath directory is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow][LinuxOnly]": "should fail if subpath file is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]": "should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]": "should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource": "should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory": "should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly]": "should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly]": "should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path": "should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount": "should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly]": "should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]": "should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow][LinuxOnly]": "should support restarting containers using file as subpath [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]": "should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]": "should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly] [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow]": "should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow][LinuxOnly]": "should write files of various sizes, verify size, validate content [Slow][LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive[Disruptive][LinuxOnly] Should test that pv written before kubelet restart is readable after restart.": "Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with different volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on different node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly]": "should access to two volumes with the same volume mode and retain data across pod recreation on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node": "should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node": "should concurrently access the single volume from pods on different node [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [LinuxOnly]": "should concurrently access the single volume from pods on the same node [LinuxOnly] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow]": "should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly]": "should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (ntfs)][sig-windows] volumes should store data": "should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should allow exec of files on the volume": "should allow exec of files on the volume [Suite:k8s]", + + "[Top Level] [sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data": "should store data [Suite:k8s]", + "[Top Level] [sig-storage] Managed cluster should have no crashlooping recycler pods over four minutes": "have no crashlooping recycler pods over four minutes [Suite:openshift/conformance/parallel]", "[Top Level] [sig-storage] Mounted flexvolume volume expand [Slow] [Feature:ExpandInUsePersistentVolumes] should be resizable when mounted": "should be resizable when mounted [Suite:k8s]", @@ -11431,6 +12295,8 @@ var annotations = map[string]string{ "[Top Level] [sig-storage] [Serial] Volume metrics PVController should create none metrics for pvc controller before creating any PV or PVC": "should create none metrics for pvc controller before creating any PV or PVC [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] [Serial] Volume metrics PVController should create total pv count metrics for with plugin and volume mode labels after creating pv": "should create total pv count metrics for with plugin and volume mode labels after creating pv [Suite:openshift/conformance/serial] [Suite:k8s]", + "[Top Level] [sig-storage] [Serial] Volume metrics PVController should create unbound pv count metrics for pvc controller after creating pv only": "should create unbound pv count metrics for pvc controller after creating pv only [Suite:openshift/conformance/serial] [Suite:k8s]", "[Top Level] [sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller after creating pvc only": "should create unbound pvc count metrics for pvc controller after creating pvc only [Suite:openshift/conformance/serial] [Suite:k8s]", diff --git a/test/extended/util/framework.go b/test/extended/util/framework.go index bb21f97a9a8e..98b9bcfac7cb 100644 --- a/test/extended/util/framework.go +++ b/test/extended/util/framework.go @@ -33,10 +33,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + quotav1 "k8s.io/apiserver/pkg/quota/v1" "k8s.io/client-go/kubernetes" batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - quotav1 "k8s.io/kubernetes/pkg/quota/v1" e2e "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/statefulset" diff --git a/test/extended/util/image/image.go b/test/extended/util/image/image.go index 02657b109edc..c1aca7f577f8 100644 --- a/test/extended/util/image/image.go +++ b/test/extended/util/image/image.go @@ -38,7 +38,7 @@ func init() { // allowed upstream kube images - index and value must match upstream or // tests will fail - "k8s.gcr.io/e2e-test-images/agnhost:2.20": 1, + "k8s.gcr.io/e2e-test-images/agnhost:2.21": 1, "docker.io/library/nginx:1.14-alpine": 23, "docker.io/library/nginx:1.15-alpine": 24, "docker.io/library/redis:5.0.5-alpine": 31, diff --git a/test/extended/util/image/zz_generated.txt b/test/extended/util/image/zz_generated.txt index 90387950142c..716387c4f6d9 100644 --- a/test/extended/util/image/zz_generated.txt +++ b/test/extended/util/image/zz_generated.txt @@ -28,8 +28,8 @@ gcr.io/kubernetes-e2e-test-images/volume/gluster:1.0 quay.io/openshift/community gcr.io/kubernetes-e2e-test-images/volume/iscsi:2.0 quay.io/openshift/community-e2e-images:e2e-36-gcr-io-kubernetes-e2e-test-images-volume-iscsi-2-0-RuKnuHHE9bwOcqDI gcr.io/kubernetes-e2e-test-images/volume/nfs:1.0 quay.io/openshift/community-e2e-images:e2e-35-gcr-io-kubernetes-e2e-test-images-volume-nfs-1-0-AW-qOVYbJYyLsrEa gcr.io/kubernetes-e2e-test-images/volume/rbd:1.0.1 quay.io/openshift/community-e2e-images:e2e-38-gcr-io-kubernetes-e2e-test-images-volume-rbd-1-0-1-ijh6-o3bvnWWIAFM -k8s.gcr.io/build-image/debian-iptables:v12.1.2 quay.io/openshift/community-e2e-images:e2e-11-k8s-gcr-io-build-image-debian-iptables-v12-1-2-KNXT7NtTs4Px1rYf -k8s.gcr.io/e2e-test-images/agnhost:2.20 quay.io/openshift/community-e2e-images:e2e-1-k8s-gcr-io-e2e-test-images-agnhost-2-20-1f8SSDq8Y3dr2Xfm +k8s.gcr.io/build-image/debian-iptables:buster-v1.3.0 quay.io/openshift/community-e2e-images:e2e-11-k8s-gcr-io-build-image-debian-iptables-buster-v1-3-0-2IwrNjgvp777TKmc +k8s.gcr.io/e2e-test-images/agnhost:2.21 quay.io/openshift/community-e2e-images:e2e-1-k8s-gcr-io-e2e-test-images-agnhost-2-21-v0OxZ-MhmEqtUyE9 k8s.gcr.io/etcd:3.4.13-0 quay.io/openshift/community-e2e-images:e2e-13-k8s-gcr-io-etcd-3-4-13-0-VKSltRW2wd8-QWft k8s.gcr.io/pause:3.2 quay.io/openshift/community-e2e-images:e2e-27-k8s-gcr-io-pause-3-2-pZ7kB8CESdFpJJRs k8s.gcr.io/prometheus-dummy-exporter:v0.1.0 quay.io/openshift/community-e2e-images:e2e-29-k8s-gcr-io-prometheus-dummy-exporter-v0-1-0-fNZIUrKnAwcvKTvF diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 000000000000..3350aaf70648 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 000000000000..d1f596bfc9b9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 000000000000..a434e73ac49d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 000000000000..dc6e3e633e6d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 000000000000..1fc28659696c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 000000000000..b9d6a27ea92e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 000000000000..de1e19a44df9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go index 914f8af5e4ea..9daa4b58b881 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -222,6 +222,10 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code case "code_expired": return nil, ErrDeviceCodeExpired default: + // return a more meaningful error message if available + if token.ErrorDescription != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) + } return nil, ErrDeviceGeneric } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod index a030eb42da8b..abcc27d4cc90 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -3,10 +3,10 @@ module github.com/Azure/go-autorest/autorest/adal go 1.12 require ( - github.com/Azure/go-autorest/autorest v0.9.0 - github.com/Azure/go-autorest/autorest/date v0.2.0 - github.com/Azure/go-autorest/autorest/mocks v0.3.0 - github.com/Azure/go-autorest/tracing v0.5.0 - github.com/dgrijalva/jwt-go v3.2.0+incompatible - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/date v0.3.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.1 + github.com/Azure/go-autorest/tracing v0.6.0 + github.com/form3tech-oss/jwt-go v3.2.2+incompatible + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum index e43cf6498d0d..9d55b0f59611 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -1,26 +1,17 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go index 28a4bfc4c437..7551b79235de 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package adal // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go index 9e15f2751f27..2a974a39b3cd 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -15,11 +15,24 @@ package adal // limitations under the License. import ( + "crypto/rsa" + "crypto/x509" "encoding/json" + "errors" "fmt" "io/ioutil" "os" "path/filepath" + + "golang.org/x/crypto/pkcs12" +) + +var ( + // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. + ErrMissingCertificate = errors.New("adal: certificate missing") + + // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. + ErrMissingPrivateKey = errors.New("adal: private key missing") ) // LoadToken restores a Token object from a file located at 'path'. @@ -71,3 +84,52 @@ func SaveToken(path string, mode os.FileMode, token Token) error { } return nil } + +// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. +// The PFX data must contain a private key along with a certificate whose public key matches that of the +// private key or an error is returned. +// If the private key is not password protected pass the empty string for password. +func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + blocks, err := pkcs12.ToPEM(pfxData, password) + if err != nil { + return nil, nil, err + } + // first extract the private key + var priv *rsa.PrivateKey + for _, block := range blocks { + if block.Type == "PRIVATE KEY" { + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + break + } + } + if priv == nil { + return nil, nil, ErrMissingPrivateKey + } + // now find the certificate with the matching public key of our private key + var cert *x509.Certificate + for _, block := range blocks { + if block.Type == "CERTIFICATE" { + pcert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certKey, ok := pcert.PublicKey.(*rsa.PublicKey) + if !ok { + // keep looking + continue + } + if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { + // found a match + cert = pcert + break + } + } + } + if cert == nil { + return nil, nil, ErrMissingCertificate + } + return cert, priv, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index b65b2c8b206a..b83f16a49a1f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -35,7 +35,7 @@ import ( "time" "github.com/Azure/go-autorest/autorest/date" - "github.com/dgrijalva/jwt-go" + "github.com/form3tech-oss/jwt-go" ) const ( @@ -62,6 +62,9 @@ const ( // msiEndpoint is the well known endpoint for getting MSI authentications tokens msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + // the API version to use for the MSI endpoint + msiAPIVersion = "2018-02-01" + // the default number of attempts to refresh an MSI authentication token defaultMaxMSIRefreshAttempts = 5 @@ -70,6 +73,9 @@ const ( // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions asMSISecretEnv = "MSI_SECRET" + + // the API version to use for the App Service MSI endpoint + appServiceAPIVersion = "2017-09-01" ) // OAuthTokenProvider is an interface which should be implemented by an access token retriever @@ -354,6 +360,7 @@ type ServicePrincipalToken struct { customRefreshFunc TokenRefresh refreshCallbacks []TokenRefreshCallback // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + // Settings this to a value less than 1 will use the default value. MaxMSIRefreshAttempts int } @@ -650,6 +657,8 @@ func GetMSIVMEndpoint() (string, error) { return msiEndpoint, nil } +// NOTE: this only indicates if the ASE environment credentials have been set +// which does not necessarily mean that the caller is authenticating via ASE! func isAppService() bool { _, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv) _, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv) @@ -678,16 +687,22 @@ func GetMSIEndpoint() (string, error) { // NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. // It will use the system assigned identity when creating the token. func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, nil, callbacks...) } // NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the specified user assigned identity when creating the token. +// It will use the clientID of specified user assigned identity when creating the token. func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the azure resource id of user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, &identityResourceID, callbacks...) } -func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, identityResourceID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { return nil, err } @@ -699,6 +714,11 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI return nil, err } } + if identityResourceID != nil { + if err := validateStringParam(*identityResourceID, "identityResourceID"); err != nil { + return nil, err + } + } // We set the oauth config token endpoint to be MSI's endpoint msiEndpointURL, err := url.Parse(msiEndpoint) if err != nil { @@ -709,13 +729,16 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI v.Set("resource", resource) // App Service MSI currently only supports token API version 2017-09-01 if isAppService() { - v.Set("api-version", "2017-09-01") + v.Set("api-version", appServiceAPIVersion) } else { - v.Set("api-version", "2018-02-01") + v.Set("api-version", msiAPIVersion) } if userAssignedID != nil { v.Set("client_id", *userAssignedID) } + if identityResourceID != nil { + v.Set("mi_res_id", *identityResourceID) + } msiEndpointURL.RawQuery = v.Encode() spt := &ServicePrincipalToken{ @@ -771,8 +794,9 @@ func (spt *ServicePrincipalToken) EnsureFresh() error { // EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by // RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - // take the write lock then check to see if the token was already refreshed + // must take the read lock when initially checking the token's expiration + if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check again to see if the token was already refreshed spt.refreshLock.Lock() defer spt.refreshLock.Unlock() if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { @@ -835,11 +859,28 @@ func (spt *ServicePrincipalToken) getGrantType() string { } func isIMDS(u url.URL) bool { - imds, err := url.Parse(msiEndpoint) + return isMSIEndpoint(u) == true || isASEEndpoint(u) == true +} + +func isMSIEndpoint(endpoint url.URL) bool { + msi, err := url.Parse(msiEndpoint) if err != nil { return false } - return (u.Host == imds.Host && u.Path == imds.Path) || isAppService() + return endpoint.Host == msi.Host && endpoint.Path == msi.Path +} + +func isASEEndpoint(endpoint url.URL) bool { + aseEndpoint, err := GetMSIAppServiceEndpoint() + if err != nil { + // app service environment isn't enabled + return false + } + ase, err := url.Parse(aseEndpoint) + if err != nil { + return false + } + return endpoint.Host == ase.Host && endpoint.Path == ase.Path } func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { @@ -858,7 +899,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } req.Header.Add("User-Agent", UserAgent()) // Add header when runtime is on App Service or Functions - if isAppService() { + if isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) { asMSISecret, _ := os.LookupEnv(asMSISecretEnv) req.Header.Add("Secret", asMSISecret) } @@ -900,6 +941,14 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } var resp *http.Response + if isMSIEndpoint(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = getMSIEndpoint(ctx, spt.sender) + if err != nil { + // return a TokenRefreshError here so that we don't keep retrying + return newTokenRefreshError(fmt.Sprintf("the MSI endpoint is not available. Failed HTTP request to MSI endpoint: %v", err), nil) + } + resp.Body.Close() + } if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) } else { @@ -972,6 +1021,11 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http attempt := 0 delay := time.Duration(0) + // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made + if maxAttempts < 1 { + maxAttempts = defaultMaxMSIRefreshAttempts + } + for attempt < maxAttempts { if resp != nil && resp.Body != nil { io.Copy(ioutil.Discard, resp.Body) @@ -1133,3 +1187,12 @@ func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, } return &m, nil } + +// MSIAvailable returns true if the MSI endpoint is available for authentication. +func MSIAvailable(ctx context.Context, sender Sender) bool { + resp, err := getMSIEndpoint(ctx, sender) + if err == nil { + resp.Body.Close() + } + return err == nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go new file mode 100644 index 000000000000..45e01a7eee8d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go @@ -0,0 +1,36 @@ +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + // this cannot fail, the return sig is due to legacy reasons + msiEndpoint, _ := GetMSIVMEndpoint() + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + // http.NewRequestWithContext() was added in Go 1.13 + req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go new file mode 100644 index 000000000000..6f7ad8078c15 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go @@ -0,0 +1,36 @@ +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + // this cannot fail, the return sig is due to legacy reasons + msiEndpoint, _ := GetMSIVMEndpoint() + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) + req = req.WithContext(tempCtx) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index f43e1a6ed5a1..15138b642f2e 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -138,6 +138,11 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { } } +// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. +func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { + return ba.tokenProvider +} + // BearerAuthorizerCallbackFunc is the authentication callback signature. type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) @@ -331,7 +336,7 @@ func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { for i := range auxTokens { auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) } - return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", "))) }) } } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index c5fc511f67c0..5326f1fd3b9b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -167,7 +167,13 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien cancelCtx, cancel = context.WithTimeout(ctx, d) defer cancel() } - + // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll + if delay, ok := f.GetPollingDelay(); ok { + if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed { + err = cancelCtx.Err() + return + } + } done, err := f.DoneWithContext(ctx, client) for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { if attempts >= client.RetryAttempts { diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 6c20b8179ab0..faff93275991 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -127,7 +127,7 @@ var ( KeyVaultDNSSuffix: "vault.usgovcloudapi.net", ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", ContainerRegistryDNSSuffix: "azurecr.us", CosmosDBDNSSuffix: "documents.azure.us", TokenAudience: "https://management.usgovcloudapi.net/", @@ -160,7 +160,7 @@ var ( KeyVaultDNSSuffix: "vault.azure.cn", ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", ContainerRegistryDNSSuffix: "azurecr.cn", CosmosDBDNSSuffix: "documents.azure.cn", TokenAudience: "https://management.chinacloudapi.cn/", @@ -242,3 +242,8 @@ func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { return } + +// SetEnvironment updates the environment map with the specified values. +func SetEnvironment(name string, env Environment) { + environments[strings.ToUpper(name)] = env +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod index 3adc4804c3d2..f88ecc4022d6 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -2,4 +2,4 @@ module github.com/Azure/go-autorest/autorest/date go 1.12 -require github.com/Azure/go-autorest/autorest v0.9.0 +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum index 9e2ee7a94844..1fc56a962ee4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -1,16 +1,2 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go index 55adf930f4ab..4e0543207171 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package date // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod index 499c56de48ae..b66c78da2ccf 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -3,9 +3,10 @@ module github.com/Azure/go-autorest/autorest go 1.12 require ( - github.com/Azure/go-autorest/autorest/adal v0.8.2 - github.com/Azure/go-autorest/autorest/mocks v0.3.0 - github.com/Azure/go-autorest/logger v0.1.0 - github.com/Azure/go-autorest/tracing v0.5.0 - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/adal v0.9.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.0 + github.com/Azure/go-autorest/logger v0.2.0 + github.com/Azure/go-autorest/tracing v0.6.0 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum index 37398d1d48a8..96d2ad0fcd86 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -1,28 +1,21 @@ -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go new file mode 100644 index 000000000000..da65e1041eb0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/mocks/go.mod b/vendor/github.com/Azure/go-autorest/autorest/mocks/go.mod index f1dfdbeb52b9..c3f6b7e866de 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/mocks/go.mod +++ b/vendor/github.com/Azure/go-autorest/autorest/mocks/go.mod @@ -2,4 +2,4 @@ module github.com/Azure/go-autorest/autorest/mocks go 1.12 -require github.com/Azure/go-autorest/autorest v0.9.0 +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/autorest/mocks/go.sum b/vendor/github.com/Azure/go-autorest/autorest/mocks/go.sum index 7d96b7ed87d5..1fc56a962ee4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/mocks/go.sum +++ b/vendor/github.com/Azure/go-autorest/autorest/mocks/go.sum @@ -1,16 +1,2 @@ -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/autorest/mocks/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/mocks/go_mod_tidy_hack.go index b828eed70ee8..85a4baf1009b 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/mocks/go_mod_tidy_hack.go +++ b/vendor/github.com/Azure/go-autorest/autorest/mocks/go_mod_tidy_hack.go @@ -16,9 +16,9 @@ package mocks // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of +// This file, and the github.com/Azure/go-autorest import, won't actually become part of // the resultant binary. // Necessary for safely adding multi-module repo. // See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest/autorest" +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go b/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go index a00a27dd82b1..18847ba0f55d 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go +++ b/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go @@ -131,7 +131,13 @@ func (c *Sender) Do(r *http.Request) (resp *http.Response, err error) { } else { err = c.responses[0].e } - time.Sleep(c.responses[0].d) + select { + case <-time.After(c.responses[0].d): + // do nothing + case <-r.Context().Done(): + err = r.Context().Err() + return + } c.repeatResponse[0]-- if c.repeatResponse[0] == 0 { c.responses = c.responses[1:] diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index a07670b8cea6..704f3e55e084 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -257,6 +257,12 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { } } +// Count429AsRetry indicates that a 429 response should be included as a retry attempt. +var Count429AsRetry = true + +// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received. +var Max429Delay time.Duration + // DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified // number of attempts, exponentially backing off between requests using the supplied backoff // time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. @@ -264,7 +270,7 @@ func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...) }) } } @@ -276,7 +282,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { return func(s Sender) Sender { return SenderFunc(func(r *http.Request) (*http.Response, error) { - return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...) }) } } @@ -297,11 +303,10 @@ func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempt return resp, err } delayed := DelayWithRetryAfter(resp, r.Context().Done()) - // enforce a 2 minute cap between requests when 429 status codes are - // not going to be counted as an attempt and when the cap is 0. - // this should only happen in the absence of a retry-after header. - if !count429 && cap == 0 { - cap = 2 * time.Minute + // if this was a 429 set the delay cap as specified. + // applicable only in the absence of a retry-after header. + if resp != nil && resp.StatusCode == http.StatusTooManyRequests { + cap = Max429Delay } if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) { return resp, r.Context().Err() diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index a42f705d3765..713e23581d92 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -19,7 +19,7 @@ import ( "runtime" ) -const number = "v13.4.0" +const number = "v14.2.1" var ( userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 000000000000..6fb8404fd01d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 000000000000..99ae6ca988a4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod index f22ed56bcde4..bedeaee039e0 100644 --- a/vendor/github.com/Azure/go-autorest/logger/go.mod +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -1,3 +1,5 @@ module github.com/Azure/go-autorest/logger go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/logger/go.sum b/vendor/github.com/Azure/go-autorest/logger/go.sum new file mode 100644 index 000000000000..1fc56a962ee4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go new file mode 100644 index 000000000000..0aa27680db9b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod index 25c34c1085a7..a2cdec78c810 100644 --- a/vendor/github.com/Azure/go-autorest/tracing/go.mod +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -1,3 +1,5 @@ module github.com/Azure/go-autorest/tracing go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/Azure/go-autorest/tracing/go.sum new file mode 100644 index 000000000000..1fc56a962ee4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 000000000000..e163975cd4e1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 000000000000..916950c02336 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,51 @@ +package osversion + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 000000000000..c26d35ac5738 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,23 @@ +package osversion + +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1809 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 2def23fa1d1c..3b809e8478c1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -43,7 +43,7 @@ type Config struct { // An optional endpoint URL (hostname only or fully qualified URI) // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. + // to `nil` or the value to `""` to use the default generated endpoint. // // Note: You must still provide a `Region` value when specifying an // endpoint for a client. @@ -138,7 +138,7 @@ type Config struct { // `ExpectContinueTimeout` for information on adjusting the continue wait // timeout. https://golang.org/pkg/net/http/#Transport // - // You should use this flag to disble 100-Continue if you experience issues + // You should use this flag to disable 100-Continue if you experience issues // with proxies or third party S3 compatible services. S3Disable100Continue *bool @@ -183,7 +183,7 @@ type Config struct { // // Example: // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDiableTimeoutOverride(true))) + // .WithEC2MetadataDisableTimeoutOverride(true))) // // svc := s3.New(sess) // @@ -194,7 +194,7 @@ type Config struct { // both IPv4 and IPv6 addressing. // // Setting this for a service which does not support dual stack will fail - // to make requets. It is not recommended to set this value on the session + // to make requests. It is not recommended to set this value on the session // as it will apply to all service clients created with the session. Even // services which don't support dual stack endpoints. // @@ -238,6 +238,7 @@ type Config struct { // EnableEndpointDiscovery will allow for endpoint discovery on operations that // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. // // Example: // sess := session.Must(session.NewSession(&aws.Config{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go index 66c5945db15e..2f9446333a65 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -2,42 +2,8 @@ package aws -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case backgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -var ( - backgroundCtx = new(emptyCtx) +import ( + "github.com/aws/aws-sdk-go/internal/context" ) // BackgroundContext returns a context that will never be canceled, has no @@ -52,5 +18,5 @@ var ( // // See https://golang.org/pkg/context for more information on Contexts. func BackgroundContext() Context { - return backgroundCtx + return context.BackgroundCtx } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index aa902d70837f..d95a5eb54080 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -225,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { r.Error = aws.ErrMissingRegion } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? r.Error = aws.ErrMissingEndpoint } }} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 000000000000..5852b2648703 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 000000000000..388b2154182d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 000000000000..8152a864add6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,39 @@ +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 000000000000..4356edb3d5d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 4af592158144..a880a3de8fe9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -54,6 +54,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -106,6 +107,13 @@ type Provider interface { IsExpired() bool } +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + // An Expirer is an interface that Providers can implement to expose the expiration // time, if known. If the Provider cannot accurately provide this info, // it should not implement this interface. @@ -165,7 +173,9 @@ type Expiry struct { // the expiration time given to ensure no requests are made with expired // tokens. func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration + // Passed in expirations should have the monotonic clock values stripped. + // This ensures time comparisons will be based on wall-time. + e.expiration = expiration.Round(0) if window > 0 { e.expiration = e.expiration.Add(-window) } @@ -197,24 +207,24 @@ func (e *Expiry) ExpiresAt() time.Time { // first instance of the credentials Value. All calls to Get() after that // will return the cached credentials Value until IsExpired() returns true. type Credentials struct { - creds Value - forceRefresh bool - - m sync.RWMutex + sf singleflight.Group + m sync.RWMutex + creds Value provider Provider } // NewCredentials returns a pointer to a new Credentials with the provider set. func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, + c := &Credentials{ + provider: provider, } + return c } -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. // // Will return the cached credentials Value if it has not expired. If the // credentials Value has expired the Provider's Retrieve() will be called @@ -222,31 +232,70 @@ func NewCredentials(provider Provider) *Credentials { // // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - // Check the cached credentials first with just the read lock. - c.m.RLock() - if !c.isExpired() { - creds := c.creds - c.m.RUnlock() - return creds, nil +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + // Check if credentials are cached, and not expired. + select { + case curCreds, ok := <-c.asyncIsExpired(): + // ok will only be true, of the credentials were not expired. ok will + // be false and have no value if the credentials are expired. + if ok { + return curCreds, nil + } + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) } - c.m.RUnlock() +} - // Credentials are expired need to retrieve the credentials taking the full - // lock. +func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { c.m.Lock() defer c.m.Unlock() - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + return curCreds, nil + } + + var creds Value + var err error + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { c.creds = creds - c.forceRefresh = false } - return c.creds, nil + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) } // Expire expires the credentials and forces them to be retrieved on the @@ -258,7 +307,7 @@ func (c *Credentials) Expire() { c.m.Lock() defer c.m.Unlock() - c.forceRefresh = true + c.creds = Value{} } // IsExpired returns if the credentials are no longer valid, and need @@ -270,12 +319,30 @@ func (c *Credentials) IsExpired() bool { c.m.RLock() defer c.m.RUnlock() - return c.isExpired() + return c.isExpiredLocked(c.creds) } -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() +// asyncIsExpired returns a channel of credentials Value. If the channel is +// closed the credentials are expired and credentials value are not empty. +func (c *Credentials) asyncIsExpired() <-chan Value { + ch := make(chan Value, 1) + go func() { + c.m.RLock() + defer c.m.RUnlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + ch <- curCreds + } + + close(ch) + }() + + return ch +} + +// isExpiredLocked helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpiredLocked(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() } // ExpiresAt provides access to the functionality of the Expirer interface of @@ -288,12 +355,29 @@ func (c *Credentials) ExpiresAt() (time.Time, error) { expirer, ok := c.provider.(Expirer) if !ok { return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + fmt.Sprintf("provider %s does not support ExpiresAt()", + c.creds.ProviderName), nil) } - if c.forceRefresh { + if c.creds == (Value{}) { // set expiration time to the distant past return time.Time{}, nil } return expirer.ExpiresAt(), nil } + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index 43d4ed386ab8..92af5b7250a8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" @@ -87,7 +88,14 @@ func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(* // Error will be returned if the request fails, or unable to extract // the desired credentials. func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -97,7 +105,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { } credsName := credsList[0] - roleCreds, err := requestCred(m.Client, credsName) + roleCreds, err := requestCred(ctx, m.Client, credsName) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -130,8 +138,8 @@ const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) if err != nil { return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) } @@ -154,8 +162,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // // If the credentials cannot be found, or there is an error reading the response // and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index 1a7af53a4da1..785f30d8e6c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -116,7 +116,13 @@ func (p *Provider) IsExpired() bool { // Retrieve will attempt to request the credentials from the endpoint the Provider // was configured for. And error will be returned if the retrieval fails. func (p *Provider) Retrieve() (credentials.Value, error) { - resp, err := p.getCredentials() + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) if err != nil { return credentials.Value{ProviderName: ProviderName}, awserr.New("CredentialsEndpointError", "failed to load credentials", err) @@ -148,7 +154,7 @@ type errorOutput struct { Message string `json:"message"` } -func (p *Provider) getCredentials() (*getCredentialsOutput, error) { +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { op := &request.Operation{ Name: "GetCredentials", HTTPMethod: "GET", @@ -156,6 +162,7 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) { out := &getCredentialsOutput{} req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) req.HTTPRequest.Header.Set("Accept", "application/json") if authToken := p.AuthorizationToken; len(authToken) != 0 { req.HTTPRequest.Header.Set("Authorization", authToken) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index e1551495812a..22b5c5d9f322 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -17,8 +17,9 @@ var ( ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type SharedCredentialsProvider struct { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 531139e3971a..cbba1e3d560c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -19,7 +19,9 @@ type StaticProvider struct { } // NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. func NewStaticCredentials(id, secret, token string) *Credentials { return NewCredentials(&StaticProvider{Value: Value{ AccessKeyID: id, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 9f37f44bcfa3..6846ef6f8085 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -87,6 +87,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkrand" "github.com/aws/aws-sdk-go/service/sts" ) @@ -118,6 +119,10 @@ type AssumeRoler interface { AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) } +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + // DefaultDuration is the default amount of time in minutes that the credentials // will be valid for. var DefaultDuration = time.Duration(15) * time.Minute @@ -164,6 +169,29 @@ type AssumeRoleProvider struct { // size. Policy *string + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + // The identification number of the MFA device that is associated with the user // who is making the AssumeRole call. Specify this value if the trust policy // of the role being assumed includes a condition that requires MFA authentication. @@ -265,6 +293,11 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. @@ -281,6 +314,7 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { RoleSessionName: aws.String(p.RoleSessionName), ExternalId: p.ExternalID, Tags: p.Tags, + PolicyArns: p.PolicyArns, TransitiveTagKeys: p.TransitiveTagKeys, } if p.Policy != nil { @@ -304,7 +338,15 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { } } - roleOutput, err := p.Client.AssumeRole(input) + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + if err != nil { return credentials.Value{ProviderName: ProviderName}, err } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go index b20b63394847..cefe2a76d4de 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -28,15 +28,46 @@ const ( // compare test values. var now = time.Now +// TokenFetcher shuold return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + // WebIdentityRoleProvider is used to retrieve credentials using // an OIDC token. type WebIdentityRoleProvider struct { credentials.Expiry + PolicyArns []*sts.PolicyDescriptorType + + // Duration the STS credentials will be valid for. Truncated to seconds. + // If unset, the assumed role will use AssumeRoleWithWebIdentity's default + // expiry duration. See + // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity + // for more information. + Duration time.Duration - client stsiface.STSAPI + // The amount of time the credentials will be refreshed before they expire. + // This is useful refresh credentials before they expire to reduce risk of + // using credentials as they expire. If unset, will default to no expiry + // window. ExpiryWindow time.Duration - tokenFilePath string + client stsiface.STSAPI + + tokenFetcher TokenFetcher roleARN string roleSessionName string } @@ -52,9 +83,15 @@ func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName // NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the // provided stsiface.STSAPI func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { return &WebIdentityRoleProvider{ client: svc, - tokenFilePath: path, + tokenFetcher: tokenFetcher, roleARN: roleARN, roleSessionName: roleSessionName, } @@ -64,10 +101,16 @@ func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, p // 'WebIdentityTokenFilePath' specified destination and if that is empty an // error will be returned. func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { - b, err := ioutil.ReadFile(p.tokenFilePath) + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) if err != nil { - errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) } sessionName := p.roleSessionName @@ -76,11 +119,22 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { // uses unix time in nanoseconds to uniquely identify sessions. sessionName = strconv.FormatInt(now().UnixNano(), 10) } + + var duration *int64 + if p.Duration != 0 { + duration = aws.Int64(int64(p.Duration / time.Second)) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, RoleArn: &p.roleARN, RoleSessionName: &sessionName, WebIdentityToken: aws.String(string(b)), + DurationSeconds: duration, }) + + req.SetContext(ctx) + // InvalidIdentityToken error is a temporary error that can occur // when assuming an Role with a JWT web identity token. req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 12897eef6265..69fa63dc08f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" @@ -15,15 +16,16 @@ import ( // getToken uses the duration to return a token for EC2 metadata service, // or an error if the request failed. -func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) { +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { op := &request.Operation{ Name: "GetToken", HTTPMethod: "PUT", - HTTPPath: "/api/token", + HTTPPath: "/latest/api/token", } var output tokenOutput req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) // remove the fetch token handler from the request handlers to avoid infinite recursion req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) @@ -50,15 +52,24 @@ func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) { // instance metadata service. The content will be returned as a string, or // error if the request failed. func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/meta-data", p), + HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), } output := &metadataOutput{} req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + err := req.Send() return output.Content, err } @@ -67,14 +78,22 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { // there is no user-data setup for the EC2 instance a "NotFoundError" error // code will be returned. func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", - HTTPPath: "/user-data", + HTTPPath: "/latest/user-data", } output := &metadataOutput{} req := c.NewRequest(op, nil, output) + req.SetContext(ctx) err := req.Send() return output.Content, err @@ -84,14 +103,22 @@ func (c *EC2Metadata) GetUserData() (string, error) { // instance metadata service for dynamic data. The content will be returned // as a string, or error if the request failed. func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/dynamic", p), + HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), } output := &metadataOutput{} req := c.NewRequest(op, nil, output) + req.SetContext(ctx) err := req.Send() return output.Content, err @@ -101,7 +128,14 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { // instance. Error is returned if the request fails or is unable to parse // the response. func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") if err != nil { return EC2InstanceIdentityDocument{}, awserr.New("EC2MetadataRequestError", @@ -120,7 +154,12 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument // IAMInfo retrieves IAM info from the metadata API func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") if err != nil { return EC2IAMInfo{}, awserr.New("EC2MetadataRequestError", @@ -145,7 +184,12 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { // Region returns the region the instance is running in. func (c *EC2Metadata) Region() (string, error) { - ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocument() + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) if err != nil { return "", err } @@ -162,7 +206,14 @@ func (c *EC2Metadata) Region() (string, error) { // Can be used to determine if application is running within an EC2 Instance and // the metadata service is available. func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { return false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index b8b2940d7446..8f35b3464ba1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -5,6 +5,10 @@ // variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to // true instructs the SDK to disable the EC2 Metadata client. The client cannot // be used while the environment variable is set to true, (case insensitive). +// +// The endpoint of the EC2 IMDS client can be configured via the environment +// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +// Session. See aws/session#Options.EC2IMDSEndpoint for more details. package ec2metadata import ( @@ -12,6 +16,7 @@ import ( "errors" "io" "net/http" + "net/url" "os" "strconv" "strings" @@ -41,7 +46,7 @@ const ( enableTokenProviderHandlerName = "enableTokenProviderHandler" // TTL constants - defaultTTL = 21600 * time.Second + defaultTTL = 21600 * time.Second ttlExpirationWindow = 30 * time.Second ) @@ -69,6 +74,9 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { // a client when not using a session. Generally using just New with a session // is preferred. // +// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS +// client is able to communicate with the EC2 IMDS API. +// // If an unmodified HTTP client is provided from the stdlib default, or no client // the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. // To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. @@ -86,6 +94,15 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg.MaxRetries = aws.Int(2) } + if u, err := url.Parse(endpoint); err == nil { + // Remove path from the endpoint since it will be added by requests. + // This is an artifact of the SDK adding `/latest` to the endpoint for + // EC2 IMDS, but this is now moved to the operation definition. + u.Path = "" + u.RawPath = "" + endpoint = u.String() + } + svc := &EC2Metadata{ Client: client.New( cfg, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go index 663372a9154e..4b29f190bf94 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -46,7 +46,7 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) { return } - output, err := t.client.getToken(t.configuredTTL) + output, err := t.client.getToken(r.Context(), t.configuredTTL) if err != nil { @@ -87,6 +87,7 @@ func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { // If the error code status is 401, we enable the token provider if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && e.StatusCode() == http.StatusUnauthorized { + t.token.Store(ec2Token{}) atomic.StoreUint32(&t.disabled, 0) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 343a2106f81a..654fb1ad52d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -93,7 +93,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol } func custAddS3DualStack(p *partition) { - if p.ID != "aws" { + if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 03f035702046..c303910847cd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -17,6 +17,7 @@ const ( // AWS Standard partition's regions. const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). @@ -24,11 +25,12 @@ const ( ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuNorth1RegionID = "eu-north-1" // EU (Stockholm). - EuWest1RegionID = "eu-west-1" // EU (Ireland). - EuWest2RegionID = "eu-west-2" // EU (London). - EuWest3RegionID = "eu-west-3" // EU (Paris). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). @@ -46,7 +48,7 @@ const ( // AWS GovCloud (US) partition's regions. const ( UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). ) // AWS ISO (US) partition's regions. @@ -97,7 +99,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") return reg }(), }, @@ -107,6 +109,9 @@ var awsPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, "ap-east-1": region{ Description: "Asia Pacific (Hong Kong)", }, @@ -129,19 +134,22 @@ var awsPartition = partition{ Description: "Canada (Central)", }, "eu-central-1": region{ - Description: "EU (Frankfurt)", + Description: "Europe (Frankfurt)", }, "eu-north-1": region{ - Description: "EU (Stockholm)", + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", }, "eu-west-1": region{ - Description: "EU (Ireland)", + Description: "Europe (Ireland)", }, "eu-west-2": region{ - Description: "EU (London)", + Description: "Europe (London)", }, "eu-west-3": region{ - Description: "EU (Paris)", + Description: "Europe (Paris)", }, "me-south-1": region{ Description: "Middle East (Bahrain)", @@ -169,9 +177,65 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "access-analyzer": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "acm": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -187,6 +251,7 @@ var awsPartition = partition{ }, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -227,6 +292,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -236,6 +302,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -277,9 +344,42 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "api.ecr": service{ Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, "ap-east-1": endpoint{ Hostname: "api.ecr.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -334,6 +434,12 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + "eu-south-1": endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, "eu-west-1": endpoint{ Hostname: "api.ecr.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -352,6 +458,54 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "fips-dkr-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-dkr-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-dkr-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-dkr-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "me-south-1": endpoint{ Hostname: "api.ecr.me-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -390,6 +544,29 @@ var awsPartition = partition{ }, }, }, + "api.elastic-inference": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, "api.mediatailor": service{ Endpoints: endpoints{ @@ -416,6 +593,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -425,6 +603,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -463,6 +642,7 @@ var awsPartition = partition{ "apigateway": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -472,6 +652,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -483,11 +664,32 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "appflow": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "application-autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -497,6 +699,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -511,6 +714,7 @@ var awsPartition = partition{ "appmesh": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -518,9 +722,13 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -537,6 +745,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -559,11 +768,17 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -579,6 +794,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -595,6 +811,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -604,6 +821,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -620,6 +838,8 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -627,8 +847,13 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -638,6 +863,7 @@ var awsPartition = partition{ "backup": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -647,6 +873,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -670,15 +897,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "budgets": service{ @@ -727,6 +979,7 @@ var awsPartition = partition{ "cloud9": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -735,10 +988,15 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -759,6 +1017,7 @@ var awsPartition = partition{ "cloudformation": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -768,15 +1027,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "cloudfront": service{ @@ -815,6 +1099,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -824,6 +1109,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -853,6 +1139,7 @@ var awsPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -862,37 +1149,78 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codebuild": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", + "fips-us-east-1": endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codeartifact": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -932,6 +1260,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -952,6 +1281,7 @@ var awsPartition = partition{ "codedeploy": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -961,6 +1291,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1010,24 +1341,76 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "codestar": service{ + "codestar-connections": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1046,9 +1429,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-idp": service{ @@ -1063,9 +1464,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-sync": service{ @@ -1089,15 +1508,36 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "comprehendmedical": service{ @@ -1107,9 +1547,27 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "config": service{ @@ -1162,6 +1620,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1195,6 +1654,7 @@ var awsPartition = partition{ "datasync": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1204,9 +1664,16 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "fips-us-east-1": endpoint{ Hostname: "datasync-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1266,6 +1733,7 @@ var awsPartition = partition{ "directconnect": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1275,27 +1743,58 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "discovery": service{ Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1303,17 +1802,24 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "docdb": service{ @@ -1402,6 +1908,7 @@ var awsPartition = partition{ "ds": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1411,14 +1918,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dynamodb": service{ @@ -1426,6 +1965,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1441,6 +1981,7 @@ var awsPartition = partition{ }, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1483,11 +2024,67 @@ var awsPartition = partition{ }, }, }, + "ebs": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "ec2": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1497,15 +2094,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "ec2metadata": service{ @@ -1522,6 +2150,7 @@ var awsPartition = partition{ "ecs": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1531,20 +2160,48 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elasticache": service{ - + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1554,15 +2211,34 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", + "fips-us-east-1": endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, + "fips-us-west-2": endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -1571,9 +2247,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "elasticbeanstalk": service{ + "elasticache": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1583,20 +2260,28 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elasticfilesystem": service{ + "elasticbeanstalk": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1606,22 +2291,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, + "elasticfilesystem": service{ + Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1631,16 +2340,188 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, + "fips-af-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, }, "elasticmapreduce": service{ Defaults: endpoint{ @@ -1648,6 +2529,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1659,9 +2541,40 @@ var awsPartition = partition{ SSLCommonName: "{service}.{region}.{dnsSuffix}", }, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{ @@ -1709,6 +2622,7 @@ var awsPartition = partition{ "es": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1718,6 +2632,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1738,6 +2653,7 @@ var awsPartition = partition{ "events": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1747,20 +2663,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "firehose": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1770,15 +2712,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "fms": service{ @@ -1786,6 +2753,8 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1794,21 +2763,117 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "forecast": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1819,7 +2884,11 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1829,13 +2898,19 @@ var awsPartition = partition{ "fsx": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1866,6 +2941,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1874,16 +2950,47 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "glue": service{ @@ -1898,15 +3005,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "greengrass": service{ @@ -1931,7 +3063,22 @@ var awsPartition = partition{ "groundstation": service{ Endpoints: endpoints{ - "eu-north-1": endpoint{}, + "af-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "me-south-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1943,6 +3090,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1952,6 +3100,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1990,7 +3139,18 @@ var awsPartition = partition{ "health": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + }, + }, + "honeycode": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, }, }, "iam": service{ @@ -2004,6 +3164,30 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + "iam-fips": endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "identitystore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "importexport": service{ @@ -2032,10 +3216,34 @@ var awsPartition = partition{ "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "iot": service{ @@ -2069,6 +3277,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, @@ -2159,6 +3368,7 @@ var awsPartition = partition{ "iotsecuredtunneling": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2170,6 +3380,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2204,9 +3415,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2217,6 +3430,7 @@ var awsPartition = partition{ "kinesis": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2226,20 +3440,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "kinesisanalytics": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2248,12 +3488,15 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2280,6 +3523,7 @@ var awsPartition = partition{ "kms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2289,6 +3533,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2300,9 +3545,106 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "lakeformation": service{ + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2310,18 +3652,46 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "lambda": service{ + "lightsail": service{ Endpoints: endpoints{ - "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2329,21 +3699,18 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "license-manager": service{ + "logs": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2353,36 +3720,69 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "lightsail": service{ + "machinelearning": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, }, }, - "logs": service{ + "macie": service{ + + Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "macie2": service{ Endpoints: endpoints{ "ap-east-1": endpoint{}, @@ -2397,19 +3797,46 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "machinelearning": service{ + "managedblockchain": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, }, }, "marketplacecommerceanalytics": service{ @@ -2421,6 +3848,7 @@ var awsPartition = partition{ "mediaconnect": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2448,14 +3876,45 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "medialive": service{ @@ -2471,10 +3930,28 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "mediapackage": service{ @@ -2486,6 +3963,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2504,6 +3982,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -2515,6 +3994,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2524,6 +4004,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2538,8 +4019,13 @@ var awsPartition = partition{ "mgh": service{ Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mobileanalytics": service{ @@ -2555,8 +4041,12 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -2566,6 +4056,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2575,20 +4066,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mq": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2597,6 +4114,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2624,11 +4142,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mturk-requester": service{ @@ -2644,6 +4163,12 @@ var awsPartition = partition{ "neptune": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, "ap-northeast-1": endpoint{ Hostname: "rds.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2704,12 +4229,24 @@ var awsPartition = partition{ Region: "eu-west-2", }, }, + "eu-west-3": endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, "me-south-1": endpoint{ Hostname: "rds.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2722,6 +4259,12 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + "us-west-1": endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{ Hostname: "rds.us-west-2.amazonaws.com", CredentialScope: credentialScope{ @@ -2733,6 +4276,24 @@ var awsPartition = partition{ "oidc": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, "ap-southeast-1": endpoint{ Hostname: "oidc.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2757,6 +4318,12 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + "eu-north-1": endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, "eu-west-1": endpoint{ Hostname: "oidc.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2834,20 +4401,67 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + "fips-aws-global": endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "outposts": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "pinpoint": service{ @@ -2857,10 +4471,15 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "fips-us-east-1": endpoint{ Hostname: "pinpoint-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2890,6 +4509,7 @@ var awsPartition = partition{ "polly": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2901,11 +4521,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "portal.sso": service{ @@ -2996,6 +4641,7 @@ var awsPartition = partition{ "ram": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3005,6 +4651,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3019,6 +4666,7 @@ var awsPartition = partition{ "rds": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3028,11 +4676,42 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, + "rds-fips.ca-central-1": endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rds-fips.us-east-1": endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rds-fips.us-east-2": endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rds-fips.us-west-1": endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rds-fips.us-west-2": endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", }, @@ -3044,6 +4723,7 @@ var awsPartition = partition{ "redshift": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3053,15 +4733,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "rekognition": service{ @@ -3072,18 +4783,50 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "rekognition-fips.ca-central-1": endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rekognition-fips.us-east-1": endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rekognition-fips.us-east-2": endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rekognition-fips.us-west-1": endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rekognition-fips.us-west-2": endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "resource-groups": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3093,6 +4836,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3151,6 +4895,12 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + "fips-aws-global": endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "route53domains": service{ @@ -3164,6 +4914,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3173,9 +4924,12 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3189,8 +4943,12 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -3198,6 +4956,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3207,6 +4966,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3253,7 +5013,8 @@ var awsPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{ Hostname: "s3.ap-northeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -3278,6 +5039,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -3362,6 +5124,13 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + "ca-central-1-fips": endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "eu-central-1": endpoint{ Hostname: "s3-control.eu-central-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, @@ -3478,10 +5247,22 @@ var awsPartition = partition{ "schemas": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -3506,6 +5287,7 @@ var awsPartition = partition{ "secretsmanager": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3515,6 +5297,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3553,6 +5336,7 @@ var awsPartition = partition{ "securityhub": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3562,15 +5346,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "serverlessrepo": service{ @@ -3637,6 +5446,8 @@ var awsPartition = partition{ "servicecatalog": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3645,9 +5456,11 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -3696,6 +5509,39 @@ var awsPartition = partition{ "eu-west-3": endpoint{}, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, + "servicediscovery-fips": endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3717,18 +5563,31 @@ var awsPartition = partition{ }, }, "shield": service{ - IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Defaults: endpoint{ SSLCommonName: "shield.us-east-1.amazonaws.com", Protocols: []string{"https"}, }, Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "sms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3738,20 +5597,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "snowball": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3759,14 +5644,112 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-northeast-3": endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "sns": service{ @@ -3774,6 +5757,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3783,15 +5767,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "sqs": service{ @@ -3800,6 +5809,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3809,6 +5819,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3849,6 +5860,7 @@ var awsPartition = partition{ "ssm": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3858,20 +5870,52 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "states": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3881,20 +5925,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "storagegateway": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3904,15 +5974,22 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "streams.dynamodb": service{ @@ -3983,6 +6060,7 @@ var awsPartition = partition{ PartitionEndpoint: "aws-global", Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3998,6 +6076,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4048,6 +6127,7 @@ var awsPartition = partition{ "swf": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4057,20 +6137,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "tagging": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4080,6 +6186,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4107,12 +6214,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "transcribestreaming": service{ @@ -4120,7 +6251,9 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -4140,11 +6273,41 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "translate": service{ @@ -4171,55 +6334,285 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-fips": endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "ap-east-1": endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-af-south-1": endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", + "fips-us-west-1": endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", + "me-south-1": endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-2": endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "workdocs": service{ @@ -4229,8 +6622,20 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "workmail": service{ @@ -4254,14 +6659,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "xray": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4271,6 +6689,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4314,6 +6733,20 @@ var awscnPartition = partition{ }, }, Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "api.ecr": service{ Endpoints: endpoints{ @@ -4331,6 +6764,13 @@ var awscnPartition = partition{ }, }, }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "apigateway": service{ Endpoints: endpoints{ @@ -4347,6 +6787,19 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -4356,6 +6809,22 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ @@ -4363,6 +6832,32 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "cloudformation": service{ Endpoints: endpoints{ @@ -4398,6 +6893,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "codedeploy": service{ Endpoints: endpoints{ @@ -4418,6 +6920,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "cur": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "dax": service{ Endpoints: endpoints{ @@ -4454,6 +6962,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "ebs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "ec2": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -4481,6 +6996,15 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ @@ -4495,6 +7019,25 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "elasticloadbalancing": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -4534,6 +7077,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "fsx": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "gamelift": service{ Endpoints: endpoints{ @@ -4552,6 +7102,7 @@ var awscnPartition = partition{ "glue": service{ Endpoints: endpoints{ + "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, @@ -4595,6 +7146,43 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -4602,6 +7190,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "kms": service{ Endpoints: endpoints{ @@ -4609,6 +7204,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "lakeformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "lambda": service{ Endpoints: endpoints{ @@ -4650,6 +7251,36 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + "fips-aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "polly": service{ Endpoints: endpoints{ @@ -4670,10 +7301,40 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "resource-groups": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "s3": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "cn-north-1": endpoint{}, @@ -4684,6 +7345,9 @@ var awscnPartition = partition{ Defaults: endpoint{ Protocols: []string{"https"}, SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "cn-north-1": endpoint{ @@ -4709,6 +7373,33 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "securityhub": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -4719,7 +7410,20 @@ var awscnPartition = partition{ "snowball": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "sns": service{ @@ -4758,7 +7462,8 @@ var awscnPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "streams.dynamodb": service{ @@ -4866,15 +7571,42 @@ var awsusgovPartition = partition{ Description: "AWS GovCloud (US-East)", }, "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", + Description: "AWS GovCloud (US-West)", }, }, Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "acm": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "acm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "acm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "acm-pca": service{ @@ -4882,6 +7614,18 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4889,6 +7633,30 @@ var awsusgovPartition = partition{ "api.ecr": service{ Endpoints: endpoints{ + "fips-dkr-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-dkr-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{ Hostname: "api.ecr.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -4907,6 +7675,18 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips-secondary": endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "apigateway": service{ @@ -4925,8 +7705,12 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, "appstream2": service{ @@ -4949,6 +7733,18 @@ var awsusgovPartition = partition{ "athena": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -4956,25 +7752,76 @@ var awsusgovPartition = partition{ "autoscaling": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, }, }, - "clouddirectory": service{ + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "backup": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, - "cloudformation": service{ + "batch": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "cloudhsm": service{ Endpoints: endpoints{ @@ -4995,20 +7842,48 @@ var awsusgovPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "codebuild": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "codecommit": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5032,11 +7907,65 @@ var awsusgovPartition = partition{ }, }, }, + "codepipeline": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "comprehend": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -5050,6 +7979,12 @@ var awsusgovPartition = partition{ "datasync": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "fips-us-gov-west-1": endpoint{ Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -5063,20 +7998,59 @@ var awsusgovPartition = partition{ "directconnect": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "dms": service{ Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, + "docdb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "ds": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5100,13 +8074,30 @@ var awsusgovPartition = partition{ }, }, }, - "ec2": service{ + "ebs": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "ec2metadata": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -5121,6 +8112,39 @@ var awsusgovPartition = partition{ "ecs": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5129,7 +8153,7 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "fips": endpoint{ - Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + Hostname: "elasticache.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, @@ -5141,19 +8165,54 @@ var awsusgovPartition = partition{ "elasticbeanstalk": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "elasticfilesystem": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "elasticloadbalancing": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, @@ -5163,12 +8222,36 @@ var awsusgovPartition = partition{ "elasticmapreduce": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"https"}, }, }, }, + "email": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "es": service{ Endpoints: endpoints{ @@ -5185,13 +8268,35 @@ var awsusgovPartition = partition{ "events": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "firehose": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5199,15 +8304,36 @@ var awsusgovPartition = partition{ "glacier": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "glue": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5218,7 +8344,18 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "dataplane-us-gov-west-1": endpoint{ + Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "guardduty": service{ @@ -5227,13 +8364,25 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "health": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "fips-us-gov-west-1": endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "iam": service{ @@ -5247,11 +8396,29 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "iam-govcloud-fips": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "inspector": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5263,11 +8430,43 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "kinesis": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesisanalytics": service{ + Endpoints: endpoints{ "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, @@ -5289,6 +8488,18 @@ var awsusgovPartition = partition{ "lambda": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5296,6 +8507,18 @@ var awsusgovPartition = partition{ "license-manager": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5303,14 +8526,29 @@ var awsusgovPartition = partition{ "logs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "mediaconvert": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-gov-west-1": endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "metering.marketplace": service{ @@ -5327,6 +8565,18 @@ var awsusgovPartition = partition{ "monitoring": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5359,11 +8609,61 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "pinpoint.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "polly": service{ Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -5377,6 +8677,18 @@ var awsusgovPartition = partition{ "rds": service{ Endpoints: endpoints{ + "rds.us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "rds.us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5384,13 +8696,29 @@ var awsusgovPartition = partition{ "redshift": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "rekognition": service{ Endpoints: endpoints{ + "rekognition-fips.us-gov-west-1": endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -5424,6 +8752,12 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "route53resolver": service{ @@ -5442,10 +8776,13 @@ var awsusgovPartition = partition{ "s3": service{ Defaults: endpoint{ SignatureVersions: []string{"s3", "s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, @@ -5464,6 +8801,9 @@ var awsusgovPartition = partition{ Defaults: endpoint{ Protocols: []string{"https"}, SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "us-gov-east-1": endpoint{ @@ -5515,22 +8855,56 @@ var awsusgovPartition = partition{ }, }, }, + "securityhub": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "serverlessrepo": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ "us-gov-east-1": endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, }, "us-gov-west-1": endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "servicecatalog": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", @@ -5543,6 +8917,18 @@ var awsusgovPartition = partition{ "sms": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5550,6 +8936,18 @@ var awsusgovPartition = partition{ "snowball": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5557,25 +8955,55 @@ var awsusgovPartition = partition{ "sns": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "sqs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", SSLCommonName: "{region}.queue.{dnsSuffix}", Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "ssm": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5583,6 +9011,18 @@ var awsusgovPartition = partition{ "states": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5590,6 +9030,13 @@ var awsusgovPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -5620,14 +9067,54 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "swf": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "tagging": service{ @@ -5642,6 +9129,38 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -5662,12 +9181,36 @@ var awsusgovPartition = partition{ "waf-regional": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "fips-us-gov-west-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "workspaces": service{ Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -5757,6 +9300,14 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -5778,6 +9329,12 @@ var awsisoPartition = partition{ "dms": service{ Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, "us-iso-east-1": endpoint{}, }, }, @@ -5840,6 +9397,12 @@ var awsisoPartition = partition{ }, }, }, + "es": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "events": service{ Endpoints: endpoints{ @@ -6016,6 +9579,20 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ @@ -6051,6 +9628,17 @@ var awsisobPartition = partition{ }, }, Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, "application-autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -6094,6 +9682,12 @@ var awsisobPartition = partition{ "dms": service{ Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, "us-isob-east-1": endpoint{}, }, }, @@ -6124,6 +9718,12 @@ var awsisobPartition = partition{ }, }, }, + "ecs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ @@ -6193,6 +9793,18 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "lambda": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "logs": service{ Endpoints: endpoints{ @@ -6249,6 +9861,12 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "states": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index eb2ac83c9927..773613722f49 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -7,6 +7,8 @@ import ( "strings" ) +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + type partitions []partition func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { @@ -124,7 +126,7 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) } func serviceList(ss services) []string { @@ -233,7 +235,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -260,6 +262,10 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ region = signingRegion } + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") + } + u := strings.Replace(hostname, "{service}", service, 1) u = strings.Replace(u, "{region}", region, 1) u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) @@ -274,7 +280,7 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ SigningName: signingName, SigningNameDerived: signingNameDerived, SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } + }, nil } func getEndpointScheme(protocols []string, disableSSL bool) string { @@ -339,3 +345,7 @@ const ( boxedFalse boxedTrue ) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go index d9b37f4d32ad..2ba3c56c11fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -9,7 +9,8 @@ func isErrConnectionReset(err error) bool { return false } - if strings.Contains(err.Error(), "connection reset") || + if strings.Contains(err.Error(), "use of closed network connection") || + strings.Contains(err.Error(), "connection reset") || strings.Contains(err.Error(), "broken pipe") { return true } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 59da73ed408c..d597c6ead555 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -135,8 +135,6 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } - SanitizeHostForHeader(httpReq) - r := &Request{ Config: cfg, ClientInfo: clientInfo, @@ -426,6 +424,8 @@ func (r *Request) Sign() error { return r.Error } + SanitizeHostForHeader(r.HTTPRequest) + r.Handlers.Sign.Run(r) return r.Error } @@ -639,6 +639,10 @@ func getHost(r *http.Request) string { return r.Host } + if r.URL == nil { + return "" + } + return r.URL.Host } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 1b61dec9c21b..752ae47f8459 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -92,6 +92,7 @@ var throttleCodes = map[string]struct{}{ "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 } // credsExpiredCodes is a collection of error codes which signify the credentials diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index cc64e24f1d56..fe6dac1f4764 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -3,6 +3,7 @@ package session import ( "fmt" "os" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -206,7 +207,14 @@ func credsFromAssumeRole(cfg aws.Config, sharedCfg.RoleARN, func(opt *stscreds.AssumeRoleProvider) { opt.RoleSessionName = sharedCfg.RoleSessionName - opt.Duration = sessOpts.AssumeRoleDuration + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } // Assume role with external ID if len(sharedCfg.ExternalID) > 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 7ec66e7e589a..cc461bd3230d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -241,5 +241,22 @@ over the AWS_CA_BUNDLE environment variable, and will be used if both are set. Setting a custom HTTPClient in the aws.Config options will override this setting. To use this option and custom HTTP client, the HTTP client needs to be provided when creating the session. Not the service client. + +The endpoint of the EC2 IMDS client can be configured via the environment +variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +Session. See Options.EC2IMDSEndpoint for more details. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + +If using an URL with an IPv6 address literal, the IPv6 address +component must be enclosed in square brackets. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + +The custom EC2 IMDS endpoint can also be specified via the Session options. + + sess, err := session.NewSessionWithOptions(session.Options{ + EC2IMDSEndpoint: "http://[::1]", + }) */ package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index c1e0e9c9543d..d67c261d74f7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -148,6 +148,11 @@ type envConfig struct { // // AWS_S3_USE_ARN_REGION=true S3UseARNRegion bool + + // Specifies the alternative endpoint to use for EC2 IMDS. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string } var ( @@ -211,6 +216,9 @@ var ( s3UseARNRegionEnvKey = []string{ "AWS_S3_USE_ARN_REGION", } + ec2IMDSEndpointEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -332,6 +340,8 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { } } + setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) + return cfg, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 0ff499605101..6430a7f15261 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -48,6 +48,8 @@ var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credenti type Session struct { Config *aws.Config Handlers request.Handlers + + options Options } // New creates a new instance of the handlers merging in the provided configs @@ -99,7 +101,7 @@ func New(cfgs ...*aws.Config) *Session { return s } - s := deprecatedNewSession(cfgs...) + s := deprecatedNewSession(envCfg, cfgs...) if envErr != nil { msg := "failed to load env config" s.logDeprecatedNewSessionError(msg, envErr, cfgs) @@ -243,6 +245,23 @@ type Options struct { // function to initialize this value before changing the handlers to be // used by the SDK. Handlers request.Handlers + + // Allows specifying a custom endpoint to be used by the EC2 IMDS client + // when making requests to the EC2 IMDS API. The must endpoint value must + // include protocol prefix. + // + // If unset, will the EC2 IMDS client will use its default endpoint. + // + // Can also be specified via the environment variable, + // AWS_EC2_METADATA_SERVICE_ENDPOINT. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + // + // If using an URL with an IPv6 address literal, the IPv6 address + // component must be enclosed in square brackets. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -329,7 +348,25 @@ func Must(sess *Session, err error) *Session { return sess } -func deprecatedNewSession(cfgs ...*aws.Config) *Session { +// Wraps the endpoint resolver with a resolver that will return a custom +// endpoint for EC2 IMDS. +func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string) endpoints.Resolver { + return endpoints.ResolverFunc( + func(service, region string, opts ...func(*endpoints.Options)) ( + endpoints.ResolvedEndpoint, error, + ) { + if service == ec2MetadataServiceID { + return endpoints.ResolvedEndpoint{ + URL: endpoint, + SigningName: ec2MetadataServiceID, + SigningRegion: region, + }, nil + } + return resolver.EndpointFor(service, region) + }) +} + +func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { cfg := defaults.Config() handlers := defaults.Handlers() @@ -341,6 +378,11 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { // endpoints for service client configurations. cfg.EndpointResolver = endpoints.DefaultResolver() } + + if len(envCfg.EC2IMDSEndpoint) != 0 { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint) + } + cfg.Credentials = defaults.CredChain(cfg, handlers) // Reapply any passed in configs to override credentials if set @@ -349,6 +391,9 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { s := &Session{ Config: cfg, Handlers: handlers, + options: Options{ + EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, + }, } initHandlers(s) @@ -418,6 +463,7 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, s := &Session{ Config: cfg, Handlers: handlers, + options: opts, } initHandlers(s) @@ -570,6 +616,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, endpoints.LegacyS3UsEast1Endpoint, }) + ec2IMDSEndpoint := sessOpts.EC2IMDSEndpoint + if len(ec2IMDSEndpoint) == 0 { + ec2IMDSEndpoint = envCfg.EC2IMDSEndpoint + } + if len(ec2IMDSEndpoint) != 0 { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint) + } + // Configure credentials if not already set by the user when creating the // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { @@ -627,6 +681,7 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ Config: s.Config.Copy(cfgs...), Handlers: s.Handlers.Copy(), + options: s.options, } initHandlers(newSession) @@ -665,6 +720,8 @@ func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Confi } } +const ec2MetadataServiceID = "ec2metadata" + func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index a8ed88076007..680805a38add 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -2,6 +2,7 @@ package session import ( "fmt" + "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" @@ -16,12 +17,13 @@ const ( sessionTokenKey = `aws_session_token` // optional // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required (or credential_source) - credentialSourceKey = `credential_source` // group required (or source_profile) - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional // CSM options csmEnabledKey = `csm_enabled` @@ -73,10 +75,11 @@ type sharedConfig struct { CredentialProcess string WebIdentityTokenFile string - RoleARN string - RoleSessionName string - ExternalID string - MFASerial string + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration SourceProfileName string SourceProfile *sharedConfig @@ -274,6 +277,11 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.CredentialSource, section, credentialSourceKey) updateString(&cfg.Region, section, regionKey) + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + cfg.AssumeRoleDuration = &d + } + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { sre, err := endpoints.GetSTSRegionalEndpoint(v) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 000000000000..f35fc860b3b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 000000000000..fed5c859ca66 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index b97334c7f28e..d71f7b3f4fa0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -340,7 +340,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi } var err error - ctx.credValues, err = v4.Credentials.Get() + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) if err != nil { return http.Header{}, err } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index d542ef01bc89..98751ee84f24 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -239,3 +239,26 @@ func (es errors) Error() string { return strings.Join(parts, "\n") } + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 94f6d68caaa4..cc41ca0bcda8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.28.2" +const SDKVersion = "1.35.24" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 000000000000..876dcb3fde28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go index cf9fad81e704..55fa73ebcf2a 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -63,9 +63,10 @@ var parseTable = map[ASTKind]map[TokenType]int{ TokenNone: MarkCompleteState, }, ASTKindEqualExpr: map[TokenType]int{ - TokenLit: ValueState, - TokenWS: SkipTokenState, - TokenNL: SkipState, + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenNone: SkipState, }, ASTKindStatement: map[TokenType]int{ TokenLit: SectionState, diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 000000000000..6a66aea5eafe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000000..14ad0c589115 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go index 50c5ed760052..5d500be27f3b 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go @@ -1,7 +1,7 @@ // Package ec2query provides serialization of AWS EC2 requests and responses. package ec2query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/ec2.json build_test.go import ( "net/url" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go index 105d732f9d38..c42b04a8d557 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go @@ -1,6 +1,6 @@ package ec2query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/ec2.json unmarshal_test.go import ( "encoding/xml" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index ea0da79a5e0b..8b2c9bbeba0c 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -6,7 +6,9 @@ import ( "encoding/json" "fmt" "io" + "math/big" "reflect" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -14,6 +16,8 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +var millisecondsFloat = new(big.Float).SetInt64(1e3) + // UnmarshalJSONError unmarshal's the reader's JSON document into the passed in // type. The value to unmarshal the json document into must be a pointer to the // type. @@ -38,17 +42,42 @@ func UnmarshalJSONError(v interface{}, stream io.Reader) error { func UnmarshalJSON(v interface{}, stream io.Reader) error { var out interface{} - err := json.NewDecoder(stream).Decode(&out) + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) if err == io.EOF { return nil } else if err != nil { return err } - return unmarshalAny(reflect.ValueOf(v), out, "") + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") } -func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { vtype := value.Type() if vtype.Kind() == reflect.Ptr { vtype = vtype.Elem() // check kind of actual element type @@ -80,17 +109,17 @@ func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) if field, ok := vtype.FieldByName("_"); ok { tag = field.Tag } - return unmarshalStruct(value, data, tag) + return u.unmarshalStruct(value, data, tag) case "list": - return unmarshalList(value, data, tag) + return u.unmarshalList(value, data, tag) case "map": - return unmarshalMap(value, data, tag) + return u.unmarshalMap(value, data, tag) default: - return unmarshalScalar(value, data, tag) + return u.unmarshalScalar(value, data, tag) } } -func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } @@ -114,7 +143,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa // unwrap any payloads if payload := tag.Get("payload"); payload != "" { field, _ := t.FieldByName(payload) - return unmarshalAny(value.FieldByName(payload), data, field.Tag) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) } for i := 0; i < t.NumField(); i++ { @@ -128,9 +157,19 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa if locName := field.Tag.Get("locationName"); locName != "" { name = locName } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } member := value.FieldByIndex(field.Index) - err := unmarshalAny(member, mapData[name], field.Tag) + err := u.unmarshalAny(member, mapData[name], field.Tag) if err != nil { return err } @@ -138,7 +177,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa return nil } -func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } @@ -153,7 +192,7 @@ func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) } for i, c := range listData { - err := unmarshalAny(value.Index(i), c, "") + err := u.unmarshalAny(value.Index(i), c, "") if err != nil { return err } @@ -162,7 +201,7 @@ func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) return nil } -func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } @@ -179,14 +218,14 @@ func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) kvalue := reflect.ValueOf(k) vvalue := reflect.New(value.Type().Elem()).Elem() - unmarshalAny(vvalue, v, "") + u.unmarshalAny(vvalue, v, "") value.SetMapIndex(kvalue, vvalue) } return nil } -func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { switch d := data.(type) { case nil: @@ -222,16 +261,31 @@ func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTa default: return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) } - case float64: + case json.Number: switch value.Interface().(type) { case *int64: - di := int64(d) + // Retain the old behavior where we would just truncate the float64 + // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt + f, err := d.Float64() + if err != nil { + return err + } + di := int64(f) value.Set(reflect.ValueOf(&di)) case *float64: - value.Set(reflect.ValueOf(&d)) + f, err := d.Float64() + if err != nil { + return err + } + value.Set(reflect.ValueOf(&f)) case *time.Time: - // Time unmarshaled from a float64 can only be epoch seconds - t := time.Unix(int64(d), 0).UTC() + float, ok := new(big.Float).SetString(d.String()) + if !ok { + return fmt.Errorf("unsupported float time representation: %v", d.String()) + } + float = float.Mul(float, millisecondsFloat) + ms, _ := float.Int64() + t := time.Unix(0, ms*1e6).UTC() value.Set(reflect.ValueOf(&t)) default: return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go index 89cfda75f668..a029217e4c61 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -2,8 +2,8 @@ // requests and responses. package jsonrpc -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go import ( "github.com/aws/aws-sdk-go/aws/awserr" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go index a2416c044851..c0c52e2db0f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -54,7 +54,7 @@ func (u *UnmarshalTypedError) UnmarshalError( // If exception code is know, use associated constructor to get a value // for the exception that the JSON body can be unmarshaled into. v := fn(respMeta) - err := jsonutil.UnmarshalJSON(v, body) + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) if err != nil { return nil, err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 0cb99eb57968..d40346a7790d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -1,7 +1,7 @@ // Package query provides serialization of AWS query requests, and responses. package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go import ( "net/url" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index f69c1efc93ad..9231e95d1604 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -1,6 +1,6 @@ package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go import ( "encoding/xml" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index 05d4ff519258..98f4caed91cc 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -27,8 +27,8 @@ const ( // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" - // This format is used for output time without seconds precision - ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" + // This format is used for output time with fractional second precision up to milliseconds + ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -48,7 +48,7 @@ func IsKnownTimestampFormat(name string) bool { // FormatTime returns a string value of the time. func FormatTime(name string, t time.Time) string { - t = t.UTC() + t = t.UTC().Truncate(time.Millisecond) switch name { case RFC822TimeFormatName: @@ -56,7 +56,8 @@ func FormatTime(name string, t time.Time) string { case ISO8601TimeFormatName: return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: - return strconv.FormatInt(t.Unix(), 10) + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) default: panic("unknown timestamp format name, " + name) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index cf981fe95132..09ad951595e4 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -8,6 +8,7 @@ import ( "reflect" "sort" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go/private/protocol" @@ -60,6 +61,14 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle return nil } + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + t := tag.Get("type") if t == "" { switch value.Kind() { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 7108d3800937..107c053f8acf 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -64,6 +64,14 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { // parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect // will be used to determine the type from r. func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + rtype := r.Type() if rtype.Kind() == reflect.Ptr { rtype = rtype.Elem() // check kind of actual element type diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index cfa9204ebddc..ae8e316f25c8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -158,8 +158,9 @@ func (c *AutoScaling) AttachLoadBalancerTargetGroupsRequest(input *AttachLoadBal // // Attaches one or more target groups to the specified Auto Scaling group. // -// To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. -// To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups. +// To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups +// API. To detach the target group from the Auto Scaling group, call the DetachLoadBalancerTargetGroups +// API. // // With Application Load Balancers and Network Load Balancers, instances are // registered as targets with a target group. With Classic Load Balancers, instances @@ -249,14 +250,17 @@ func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput // AttachLoadBalancers API operation for Auto Scaling. // -// Attaches one or more Classic Load Balancers to the specified Auto Scaling -// group. // -// To attach an Application Load Balancer or a Network Load Balancer instead, -// see AttachLoadBalancerTargetGroups. +// To attach an Application Load Balancer or a Network Load Balancer, use the +// AttachLoadBalancerTargetGroups API operation instead. +// +// Attaches one or more Classic Load Balancers to the specified Auto Scaling +// group. Amazon EC2 Auto Scaling registers the running instances with these +// Classic Load Balancers. // -// To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. -// To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers. +// To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers +// API. To detach the load balancer from the Auto Scaling group, call the DetachLoadBalancers +// API. // // For more information, see Attaching a Load Balancer to Your Auto Scaling // Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-load-balancer-asg.html) @@ -442,7 +446,8 @@ func (c *AutoScaling) BatchPutScheduledUpdateGroupActionRequest(input *BatchPutS // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -470,6 +475,101 @@ func (c *AutoScaling) BatchPutScheduledUpdateGroupActionWithContext(ctx aws.Cont return out, req.Send() } +const opCancelInstanceRefresh = "CancelInstanceRefresh" + +// CancelInstanceRefreshRequest generates a "aws/request.Request" representing the +// client's request for the CancelInstanceRefresh operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelInstanceRefresh for more information on using the CancelInstanceRefresh +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelInstanceRefreshRequest method. +// req, resp := client.CancelInstanceRefreshRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CancelInstanceRefresh +func (c *AutoScaling) CancelInstanceRefreshRequest(input *CancelInstanceRefreshInput) (req *request.Request, output *CancelInstanceRefreshOutput) { + op := &request.Operation{ + Name: opCancelInstanceRefresh, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelInstanceRefreshInput{} + } + + output = &CancelInstanceRefreshOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelInstanceRefresh API operation for Auto Scaling. +// +// Cancels an instance refresh operation in progress. Cancellation does not +// roll back any replacements that have already been completed, but it prevents +// new replacements from being started. +// +// For more information, see Replacing Auto Scaling Instances Based on an Instance +// Refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Auto Scaling's +// API operation CancelInstanceRefresh for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededFault "LimitExceeded" +// You have already reached a limit for your Amazon EC2 Auto Scaling resources +// (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. +// +// * ErrCodeResourceContentionFault "ResourceContention" +// You already have a pending update to an Amazon EC2 Auto Scaling resource +// (for example, an Auto Scaling group, instance, or load balancer). +// +// * ErrCodeActiveInstanceRefreshNotFoundFault "ActiveInstanceRefreshNotFound" +// The request failed because an active instance refresh for the specified Auto +// Scaling group was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CancelInstanceRefresh +func (c *AutoScaling) CancelInstanceRefresh(input *CancelInstanceRefreshInput) (*CancelInstanceRefreshOutput, error) { + req, out := c.CancelInstanceRefreshRequest(input) + return out, req.Send() +} + +// CancelInstanceRefreshWithContext is the same as CancelInstanceRefresh with the addition of +// the ability to pass a context and additional request options. +// +// See CancelInstanceRefresh for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) CancelInstanceRefreshWithContext(ctx aws.Context, input *CancelInstanceRefreshInput, opts ...request.Option) (*CancelInstanceRefreshOutput, error) { + req, out := c.CancelInstanceRefreshRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCompleteLifecycleAction = "CompleteLifecycleAction" // CompleteLifecycleActionRequest generates a "aws/request.Request" representing the @@ -622,11 +722,23 @@ func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGrou // Creates an Auto Scaling group with the specified name and attributes. // // If you exceed your maximum limit of Auto Scaling groups, the call fails. -// For information about viewing this limit, see DescribeAccountLimits. For -// information about updating this limit, see Amazon EC2 Auto Scaling Limits -// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) +// To query this limit, call the DescribeAccountLimits API. For information +// about updating this limit, see Amazon EC2 Auto Scaling Service Quotas (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) +// in the Amazon EC2 Auto Scaling User Guide. +// +// For introductory exercises for creating an Auto Scaling group, see Getting +// Started with Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/GettingStartedTutorial.html) +// and Tutorial: Set Up a Scaled and Load-Balanced Application (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-register-lbs-with-asg.html) +// in the Amazon EC2 Auto Scaling User Guide. For more information, see Auto +// Scaling Groups (https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html) // in the Amazon EC2 Auto Scaling User Guide. // +// Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, +// and MinSize). Usually, you set these sizes based on a specific number of +// instances. However, if you configure a mixed instances policy that defines +// weights for the instance types, you must specify these sizes with the same +// units that you use for weighting instances. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -642,7 +754,8 @@ func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGrou // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -721,9 +834,8 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig // Creates a launch configuration. // // If you exceed your maximum limit of launch configurations, the call fails. -// For information about viewing this limit, see DescribeAccountLimits. For -// information about updating this limit, see Amazon EC2 Auto Scaling Limits -// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) +// To query this limit, call the DescribeAccountLimits API. For information +// about updating this limit, see Amazon EC2 Auto Scaling Service Quotas (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) // in the Amazon EC2 Auto Scaling User Guide. // // For more information, see Launch Configurations (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html) @@ -744,7 +856,8 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -836,7 +949,8 @@ func (c *AutoScaling) CreateOrUpdateTagsRequest(input *CreateOrUpdateTagsInput) // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeAlreadyExistsFault "AlreadyExists" // You already have an Auto Scaling group or launch configuration with this @@ -925,13 +1039,13 @@ func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGrou // alarm actions, and any alarm that no longer has an associated action. // // To remove instances from the Auto Scaling group before deleting it, call -// DetachInstances with the list of instances and the option to decrement the -// desired capacity. This ensures that Amazon EC2 Auto Scaling does not launch -// replacement instances. +// the DetachInstances API with the list of instances and the option to decrement +// the desired capacity. This ensures that Amazon EC2 Auto Scaling does not +// launch replacement instances. // -// To terminate all instances before deleting the Auto Scaling group, call UpdateAutoScalingGroup -// and set the minimum size and desired capacity of the Auto Scaling group to -// zero. +// To terminate all instances before deleting the Auto Scaling group, call the +// UpdateAutoScalingGroup API and set the minimum size and desired capacity +// of the Auto Scaling group to zero. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1527,11 +1641,11 @@ func (c *AutoScaling) DescribeAccountLimitsRequest(input *DescribeAccountLimitsI // DescribeAccountLimits API operation for Auto Scaling. // -// Describes the current Amazon EC2 Auto Scaling resource limits for your AWS +// Describes the current Amazon EC2 Auto Scaling resource quotas for your AWS // account. // -// For information about requesting an increase in these limits, see Amazon -// EC2 Auto Scaling Limits (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) +// For information about requesting an increase, see Amazon EC2 Auto Scaling +// Service Quotas (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1612,7 +1726,17 @@ func (c *AutoScaling) DescribeAdjustmentTypesRequest(input *DescribeAdjustmentTy // DescribeAdjustmentTypes API operation for Auto Scaling. // -// Describes the policy adjustment types for use with PutScalingPolicy. +// Describes the available adjustment types for Amazon EC2 Auto Scaling scaling +// policies. These settings apply to step scaling policies and simple scaling +// policies; they do not apply to target tracking scaling policies. +// +// The following adjustment types are supported: +// +// * ChangeInCapacity +// +// * ExactCapacity +// +// * PercentChangeInCapacity // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2010,6 +2134,110 @@ func (c *AutoScaling) DescribeAutoScalingNotificationTypesWithContext(ctx aws.Co return out, req.Send() } +const opDescribeInstanceRefreshes = "DescribeInstanceRefreshes" + +// DescribeInstanceRefreshesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceRefreshes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceRefreshes for more information on using the DescribeInstanceRefreshes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceRefreshesRequest method. +// req, resp := client.DescribeInstanceRefreshesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeInstanceRefreshes +func (c *AutoScaling) DescribeInstanceRefreshesRequest(input *DescribeInstanceRefreshesInput) (req *request.Request, output *DescribeInstanceRefreshesOutput) { + op := &request.Operation{ + Name: opDescribeInstanceRefreshes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceRefreshesInput{} + } + + output = &DescribeInstanceRefreshesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceRefreshes API operation for Auto Scaling. +// +// Describes one or more instance refreshes. +// +// You can determine the status of a request by looking at the Status parameter. +// The following are the possible statuses: +// +// * Pending - The request was created, but the operation has not started. +// +// * InProgress - The operation is in progress. +// +// * Successful - The operation completed successfully. +// +// * Failed - The operation failed to complete. You can troubleshoot using +// the status reason and the scaling activities. +// +// * Cancelling - An ongoing operation is being cancelled. Cancellation does +// not roll back any replacements that have already been completed, but it +// prevents new replacements from being started. +// +// * Cancelled - The operation is cancelled. +// +// For more information, see Replacing Auto Scaling Instances Based on an Instance +// Refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Auto Scaling's +// API operation DescribeInstanceRefreshes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextToken "InvalidNextToken" +// The NextToken value is not valid. +// +// * ErrCodeResourceContentionFault "ResourceContention" +// You already have a pending update to an Amazon EC2 Auto Scaling resource +// (for example, an Auto Scaling group, instance, or load balancer). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeInstanceRefreshes +func (c *AutoScaling) DescribeInstanceRefreshes(input *DescribeInstanceRefreshesInput) (*DescribeInstanceRefreshesOutput, error) { + req, out := c.DescribeInstanceRefreshesRequest(input) + return out, req.Send() +} + +// DescribeInstanceRefreshesWithContext is the same as DescribeInstanceRefreshes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceRefreshes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeInstanceRefreshesWithContext(ctx aws.Context, input *DescribeInstanceRefreshesInput, opts ...request.Option) (*DescribeInstanceRefreshesOutput, error) { + req, out := c.DescribeInstanceRefreshesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeLaunchConfigurations = "DescribeLaunchConfigurations" // DescribeLaunchConfigurationsRequest generates a "aws/request.Request" representing the @@ -2444,8 +2672,8 @@ func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersI // Describes the load balancers for the specified Auto Scaling group. // // This operation describes only Classic Load Balancers. If you have Application -// Load Balancers or Network Load Balancers, use DescribeLoadBalancerTargetGroups -// instead. +// Load Balancers or Network Load Balancers, use the DescribeLoadBalancerTargetGroups +// API instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2528,7 +2756,7 @@ func (c *AutoScaling) DescribeMetricCollectionTypesRequest(input *DescribeMetric // Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling. // // The GroupStandbyInstances metric is not returned by default. You must explicitly -// request this metric when calling EnableMetricsCollection. +// request this metric when calling the EnableMetricsCollection API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3035,7 +3263,8 @@ func (c *AutoScaling) DescribeScalingProcessTypesRequest(input *DescribeScalingP // DescribeScalingProcessTypes API operation for Auto Scaling. // -// Describes the scaling process types for use with ResumeProcesses and SuspendProcesses. +// Describes the scaling process types for use with the ResumeProcesses and +// SuspendProcesses APIs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3123,7 +3352,7 @@ func (c *AutoScaling) DescribeScheduledActionsRequest(input *DescribeScheduledAc // // Describes the actions scheduled for your Auto Scaling group that haven't // run or that have not reached their end time. To describe the actions that -// have already run, use DescribeScalingActivities. +// have already run, call the DescribeScalingActivities API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3275,6 +3504,9 @@ func (c *AutoScaling) DescribeTagsRequest(input *DescribeTagsInput) (req *reques // a particular tag only if it matches all the filters. If there's no match, // no special message is returned. // +// For more information, see Tagging Auto Scaling Groups and Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html) +// in the Amazon EC2 Auto Scaling User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3673,13 +3905,13 @@ func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput // group. // // This operation detaches only Classic Load Balancers. If you have Application -// Load Balancers or Network Load Balancers, use DetachLoadBalancerTargetGroups -// instead. +// Load Balancers or Network Load Balancers, use the DetachLoadBalancerTargetGroups +// API instead. // // When you detach a load balancer, it enters the Removing state while deregistering // the instances in the group. When all instances are deregistered, then you -// can no longer describe the load balancer using DescribeLoadBalancers. The -// instances remain running. +// can no longer describe the load balancer using the DescribeLoadBalancers +// API call. The instances remain running. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4017,7 +4249,8 @@ func (c *AutoScaling) ExecutePolicyRequest(input *ExecutePolicyInput) (req *requ // ExecutePolicy API operation for Auto Scaling. // -// Executes the specified policy. +// Executes the specified policy. This can be useful for testing the design +// of your scaling policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4209,10 +4442,11 @@ func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req // launch or terminate. // // If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state using RecordLifecycleActionHeartbeat. +// instance in a pending state using the RecordLifecycleActionHeartbeat API +// call. // // If you finish before the timeout period ends, complete the lifecycle action -// using CompleteLifecycleAction. +// using the CompleteLifecycleAction API call. // // For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks (https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html) // in the Amazon EC2 Auto Scaling User Guide. @@ -4220,8 +4454,9 @@ func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req // If you exceed your maximum limit of lifecycle hooks, which by default is // 50 per Auto Scaling group, the call fails. // -// You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. -// If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook. +// You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks +// API call. If you are no longer using a lifecycle hook, you can delete it +// by calling the DeleteLifecycleHook API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4234,7 +4469,8 @@ func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -4317,6 +4553,9 @@ func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotification // Scaling Group Scales (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ASGettingNotifications.html) // in the Amazon EC2 Auto Scaling User Guide. // +// If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling +// group, the call fails. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4328,7 +4567,8 @@ func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotification // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -4403,13 +4643,11 @@ func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req // PutScalingPolicy API operation for Auto Scaling. // -// Creates or updates a scaling policy for an Auto Scaling group. To update -// an existing scaling policy, use the existing policy name and set the parameters -// to change. Any existing parameter not changed in an update to an existing -// policy is not changed in this update request. +// Creates or updates a scaling policy for an Auto Scaling group. // // For more information about using scaling policies to scale your Auto Scaling -// group automatically, see Dynamic Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scale-based-on-demand.html) +// group, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-target-tracking.html) +// and Step and Simple Scaling Policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4423,7 +4661,8 @@ func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -4521,7 +4760,8 @@ func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUp // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -4596,7 +4836,7 @@ func (c *AutoScaling) RecordLifecycleActionHeartbeatRequest(input *RecordLifecyc // // Records a heartbeat for the lifecycle action associated with the specified // token or instance. This extends the timeout by the length of time defined -// using PutLifecycleHook. +// using the PutLifecycleHook API call. // // This step is a part of the procedure for adding a lifecycle hook to an Auto // Scaling group: @@ -4789,8 +5029,11 @@ func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput) // // Sets the size of the specified Auto Scaling group. // -// For more information about desired capacity, see What Is Amazon EC2 Auto -// Scaling? (https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html) +// If a scale-in activity occurs as a result of a new DesiredCapacity value +// that is lower than the current size of the group, the Auto Scaling group +// uses its termination policy to determine which instances to terminate. +// +// For more information, see Manual Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-manual-scaling.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4966,6 +5209,9 @@ func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionI // Scaling group from terminating on scale in, see Instance Protection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-instance-termination.html#instance-protection) // in the Amazon EC2 Auto Scaling User Guide. // +// If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling +// group, the call fails. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4977,7 +5223,8 @@ func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionI // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -5005,6 +5252,107 @@ func (c *AutoScaling) SetInstanceProtectionWithContext(ctx aws.Context, input *S return out, req.Send() } +const opStartInstanceRefresh = "StartInstanceRefresh" + +// StartInstanceRefreshRequest generates a "aws/request.Request" representing the +// client's request for the StartInstanceRefresh operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartInstanceRefresh for more information on using the StartInstanceRefresh +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartInstanceRefreshRequest method. +// req, resp := client.StartInstanceRefreshRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/StartInstanceRefresh +func (c *AutoScaling) StartInstanceRefreshRequest(input *StartInstanceRefreshInput) (req *request.Request, output *StartInstanceRefreshOutput) { + op := &request.Operation{ + Name: opStartInstanceRefresh, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstanceRefreshInput{} + } + + output = &StartInstanceRefreshOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartInstanceRefresh API operation for Auto Scaling. +// +// Starts a new instance refresh operation, which triggers a rolling replacement +// of all previously launched instances in the Auto Scaling group with a new +// group of instances. +// +// If successful, this call creates a new instance refresh request with a unique +// ID that you can use to track its progress. To query its status, call the +// DescribeInstanceRefreshes API. To describe the instance refreshes that have +// already run, call the DescribeInstanceRefreshes API. To cancel an instance +// refresh operation in progress, use the CancelInstanceRefresh API. +// +// For more information, see Replacing Auto Scaling Instances Based on an Instance +// Refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Auto Scaling's +// API operation StartInstanceRefresh for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededFault "LimitExceeded" +// You have already reached a limit for your Amazon EC2 Auto Scaling resources +// (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. +// +// * ErrCodeResourceContentionFault "ResourceContention" +// You already have a pending update to an Amazon EC2 Auto Scaling resource +// (for example, an Auto Scaling group, instance, or load balancer). +// +// * ErrCodeInstanceRefreshInProgressFault "InstanceRefreshInProgress" +// The request failed because an active instance refresh operation already exists +// for the specified Auto Scaling group. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/StartInstanceRefresh +func (c *AutoScaling) StartInstanceRefresh(input *StartInstanceRefreshInput) (*StartInstanceRefreshOutput, error) { + req, out := c.StartInstanceRefreshRequest(input) + return out, req.Send() +} + +// StartInstanceRefreshWithContext is the same as StartInstanceRefresh with the addition of +// the ability to pass a context and additional request options. +// +// See StartInstanceRefresh for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) StartInstanceRefreshWithContext(ctx aws.Context, input *StartInstanceRefreshInput, opts ...request.Option) (*StartInstanceRefreshOutput, error) { + req, out := c.StartInstanceRefreshRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSuspendProcesses = "SuspendProcesses" // SuspendProcessesRequest generates a "aws/request.Request" representing the @@ -5054,13 +5402,12 @@ func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req * // the specified Auto Scaling group. // // If you suspend either the Launch or Terminate process types, it can prevent -// other process types from functioning properly. -// -// To resume processes that have been suspended, use ResumeProcesses. -// -// For more information, see Suspending and Resuming Scaling Processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html) +// other process types from functioning properly. For more information, see +// Suspending and Resuming Scaling Processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html) // in the Amazon EC2 Auto Scaling User Guide. // +// To resume processes that have been suspended, call the ResumeProcesses API. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5146,7 +5493,19 @@ func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *Terminat // size. // // This call simply makes a termination request. The instance is not terminated -// immediately. +// immediately. When an instance is terminated, the instance status changes +// to terminated. You can't connect to or start an instance after you've terminated +// it. +// +// If you do not specify the option to decrement the desired capacity, Amazon +// EC2 Auto Scaling launches instances to replace the ones that are terminated. +// +// By default, Amazon EC2 Auto Scaling balances instances across all Availability +// Zones. If you decrement the desired capacity, your Auto Scaling group can +// become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries +// to rebalance the group, and rebalancing might terminate instances in other +// zones. For more information, see Rebalancing Activities (https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-benefits.html#AutoScalingBehavior.InstanceUsage) +// in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5253,7 +5612,7 @@ func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGrou // // Note the following about changing DesiredCapacity, MaxSize, or MinSize: // -// * If a scale-in event occurs as a result of a new DesiredCapacity value +// * If a scale-in activity occurs as a result of a new DesiredCapacity value // that is lower than the current size of the group, the Auto Scaling group // uses its termination policy to determine which instances to terminate. // @@ -5266,9 +5625,10 @@ func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGrou // of the group, this sets the group's DesiredCapacity to the new MaxSize // value. // -// To see which parameters have been set, use DescribeAutoScalingGroups. You -// can also view the scaling policies for an Auto Scaling group using DescribePolicies. -// If the group has scaling policies, you can update them using PutScalingPolicy. +// To see which parameters have been set, call the DescribeAutoScalingGroups +// API. To view the scaling policies for an Auto Scaling group, call the DescribePolicies +// API. If the group has scaling policies, you can update them by calling the +// PutScalingPolicy API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5869,17 +6229,23 @@ type BlockDeviceMapping struct { // DeviceName is a required field DeviceName *string `min:"1" type:"string" required:"true"` - // The information about the Amazon EBS volume. + // Parameters used to automatically set up EBS volumes when an instance is launched. + // + // You can specify either VirtualName or Ebs, but not both. Ebs *Ebs `type:"structure"` - // Suppresses a device mapping. + // Setting this value to true suppresses the specified device included in the + // block device mapping of the AMI. + // + // If NoDevice is true for the root device, instances might fail the EC2 health + // check. In that case, Amazon EC2 Auto Scaling launches replacement instances. // - // If this parameter is true for the root device, the instance might fail the - // EC2 health check. In that case, Amazon EC2 Auto Scaling launches a replacement - // instance. + // If you specify NoDevice, you cannot specify Ebs. NoDevice *bool `type:"boolean"` // The name of the virtual device (for example, ephemeral0). + // + // You can specify either VirtualName or Ebs, but not both. VirtualName *string `min:"1" type:"string"` } @@ -5941,56 +6307,120 @@ func (s *BlockDeviceMapping) SetVirtualName(v string) *BlockDeviceMapping { return s } -type CompleteLifecycleActionInput struct { +type CancelInstanceRefreshInput struct { _ struct{} `type:"structure"` // The name of the Auto Scaling group. // // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - - // The ID of the instance. - InstanceId *string `min:"1" type:"string"` - - // The action for the group to take. This parameter can be either CONTINUE or - // ABANDON. - // - // LifecycleActionResult is a required field - LifecycleActionResult *string `type:"string" required:"true"` - - // A universally unique identifier (UUID) that identifies a specific lifecycle - // action associated with an instance. Amazon EC2 Auto Scaling sends this token - // to the notification target you specified when you created the lifecycle hook. - LifecycleActionToken *string `min:"36" type:"string"` - - // The name of the lifecycle hook. - // - // LifecycleHookName is a required field - LifecycleHookName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CompleteLifecycleActionInput) String() string { +func (s CancelInstanceRefreshInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompleteLifecycleActionInput) GoString() string { +func (s CancelInstanceRefreshInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CompleteLifecycleActionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CompleteLifecycleActionInput"} +func (s *CancelInstanceRefreshInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelInstanceRefreshInput"} if s.AutoScalingGroupName == nil { invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) } if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) } - if s.InstanceId != nil && len(*s.InstanceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) - } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *CancelInstanceRefreshInput) SetAutoScalingGroupName(v string) *CancelInstanceRefreshInput { + s.AutoScalingGroupName = &v + return s +} + +type CancelInstanceRefreshOutput struct { + _ struct{} `type:"structure"` + + // The instance refresh ID. + InstanceRefreshId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CancelInstanceRefreshOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelInstanceRefreshOutput) GoString() string { + return s.String() +} + +// SetInstanceRefreshId sets the InstanceRefreshId field's value. +func (s *CancelInstanceRefreshOutput) SetInstanceRefreshId(v string) *CancelInstanceRefreshOutput { + s.InstanceRefreshId = &v + return s +} + +type CompleteLifecycleActionInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string"` + + // The action for the group to take. This parameter can be either CONTINUE or + // ABANDON. + // + // LifecycleActionResult is a required field + LifecycleActionResult *string `type:"string" required:"true"` + + // A universally unique identifier (UUID) that identifies a specific lifecycle + // action associated with an instance. Amazon EC2 Auto Scaling sends this token + // to the notification target you specified when you created the lifecycle hook. + LifecycleActionToken *string `min:"36" type:"string"` + + // The name of the lifecycle hook. + // + // LifecycleHookName is a required field + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteLifecycleActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLifecycleActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteLifecycleActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteLifecycleActionInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) + } if s.LifecycleActionResult == nil { invalidParams.Add(request.NewErrParamRequired("LifecycleActionResult")) } @@ -6070,18 +6500,34 @@ type CreateAutoScalingGroupInput struct { // is required to launch instances into EC2-Classic. AvailabilityZones []*string `min:"1" type:"list"` + // Indicates whether capacity rebalance is enabled. Otherwise, capacity rebalance + // is disabled. + // + // You can enable capacity rebalancing for your Auto Scaling groups when using + // Spot Instances. When you turn on capacity rebalancing, Amazon EC2 Auto Scaling + // attempts to launch a Spot Instance whenever Amazon EC2 predicts that a Spot + // Instance is at an elevated risk of interruption. After launching a new instance, + // it then terminates an old instance. For more information, see Amazon EC2 + // Auto Scaling capacity rebalancing (https://docs.aws.amazon.com/autoscaling/ec2/userguide/capacity-rebalance.html) + // in the Amazon EC2 Auto Scaling User Guide. + CapacityRebalance *bool `type:"boolean"` + // The amount of time, in seconds, after a scaling activity completes before // another scaling activity can start. The default value is 300. // - // For more information, see Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // This setting applies when using simple scaling policies, but not when using + // other scaling policies or scheduled scaling. For more information, see Scaling + // Cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. DefaultCooldown *int64 `type:"integer"` - // The number of Amazon EC2 instances that the Auto Scaling group attempts to - // maintain. This number must be greater than or equal to the minimum size of - // the group and less than or equal to the maximum size of the group. If you - // do not specify a desired capacity, the default is the minimum size of the - // group. + // The desired capacity is the initial capacity of the Auto Scaling group at + // the time of its creation and the capacity it attempts to maintain. It can + // scale beyond this capacity if you configure automatic scaling. + // + // This number must be greater than or equal to the minimum size of the group + // and less than or equal to the maximum size of the group. If you do not specify + // a desired capacity, the default is the minimum size of the group. DesiredCapacity *int64 `type:"integer"` // The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before @@ -6092,7 +6538,7 @@ type CreateAutoScalingGroupInput struct { // For more information, see Health Check Grace Period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period) // in the Amazon EC2 Auto Scaling User Guide. // - // Conditional: This parameter is required if you are adding an ELB health check. + // Required if you are adding an ELB health check. HealthCheckGracePeriod *int64 `type:"integer"` // The service to use for the health checks. The valid values are EC2 and ELB. @@ -6105,33 +6551,38 @@ type CreateAutoScalingGroupInput struct { HealthCheckType *string `min:"1" type:"string"` // The ID of the instance used to create a launch configuration for the group. + // To get the instance ID, use the Amazon EC2 DescribeInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) + // API operation. // // When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a // new launch configuration and associates it with the group. This launch configuration // derives its attributes from the specified instance, except for the block // device mapping. // - // For more information, see Create an Auto Scaling Group Using an EC2 Instance - // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html) - // in the Amazon EC2 Auto Scaling User Guide. - // // You must specify one of the following parameters in your request: LaunchConfigurationName, // LaunchTemplate, InstanceId, or MixedInstancesPolicy. InstanceId *string `min:"1" type:"string"` - // The name of the launch configuration. + // The name of the launch configuration to use when an instance is launched. + // To get the launch configuration name, use the DescribeLaunchConfigurations + // API operation. New launch configurations can be created with the CreateLaunchConfiguration + // API. // - // If you do not specify LaunchConfigurationName, you must specify one of the - // following parameters: InstanceId, LaunchTemplate, or MixedInstancesPolicy. + // You must specify one of the following parameters in your request: LaunchConfigurationName, + // LaunchTemplate, InstanceId, or MixedInstancesPolicy. LaunchConfigurationName *string `min:"1" type:"string"` - // The launch template to use to launch instances. + // Parameters used to specify the launch template and version to use when an + // instance is launched. // // For more information, see LaunchTemplateSpecification (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_LaunchTemplateSpecification.html) // in the Amazon EC2 Auto Scaling API Reference. // - // If you do not specify LaunchTemplate, you must specify one of the following - // parameters: InstanceId, LaunchConfigurationName, or MixedInstancesPolicy. + // You can alternatively associate a launch template to the Auto Scaling group + // by using the MixedInstancesPolicy parameter. + // + // You must specify one of the following parameters in your request: LaunchConfigurationName, + // LaunchTemplate, InstanceId, or MixedInstancesPolicy. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // One or more lifecycle hooks. @@ -6147,12 +6598,27 @@ type CreateAutoScalingGroupInput struct { LoadBalancerNames []*string `type:"list"` // The maximum amount of time, in seconds, that an instance can be in service. + // The default is null. + // + // This parameter is optional, but if you specify a value for it, you must specify + // a value of at least 604,800 seconds (7 days). To clear a previously set value, + // specify a new value of 0. + // + // For more information, see Replacing Auto Scaling Instances Based on Maximum + // Instance Lifetime (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) + // in the Amazon EC2 Auto Scaling User Guide. // - // Valid Range: Minimum value of 604800. + // Valid Range: Minimum value of 0. MaxInstanceLifetime *int64 `type:"integer"` // The maximum size of the group. // + // With a mixed instances policy that uses instance weighting, Amazon EC2 Auto + // Scaling may need to go above MaxSize to meet your capacity requirements. + // In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more + // than your largest instance weight (weights that define how many units each + // instance contributes to the desired capacity of the group). + // // MaxSize is a required field MaxSize *int64 `type:"integer" required:"true"` @@ -6203,7 +6669,14 @@ type CreateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling User Guide. ServiceLinkedRoleARN *string `min:"1" type:"string"` - // One or more tags. + // One or more tags. You can tag your Auto Scaling group and propagate the tags + // to the Amazon EC2 instances it launches. + // + // Tags are not propagated to Amazon EBS volumes. To add tags to Amazon EBS + // volumes, specify the tags in a launch template but use caution. If the launch + // template specifies an instance tag with a key that is also specified for + // the Auto Scaling group, Amazon EC2 Auto Scaling overrides the value of that + // instance tag with the value specified by the Auto Scaling group. // // For more information, see Tagging Auto Scaling Groups and Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html) // in the Amazon EC2 Auto Scaling User Guide. @@ -6331,6 +6804,12 @@ func (s *CreateAutoScalingGroupInput) SetAvailabilityZones(v []*string) *CreateA return s } +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *CreateAutoScalingGroupInput) SetCapacityRebalance(v bool) *CreateAutoScalingGroupInput { + s.CapacityRebalance = &v + return s +} + // SetDefaultCooldown sets the DefaultCooldown field's value. func (s *CreateAutoScalingGroupInput) SetDefaultCooldown(v int64) *CreateAutoScalingGroupInput { s.DefaultCooldown = &v @@ -6559,7 +7038,7 @@ type CreateLaunchConfigurationInput struct { // When detailed monitoring is enabled, Amazon CloudWatch generates metrics // every minute and your account is charged a fee. When you disable detailed // monitoring, CloudWatch generates metrics every 5 minutes. For more information, - // see Configure Monitoring for Auto Scaling Instances (https://docs.aws.amazon.com/autoscaling/latest/userguide/as-instance-monitoring.html#enable-as-instance-metrics) + // see Configure Monitoring for Auto Scaling Instances (https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) // in the Amazon EC2 Auto Scaling User Guide. InstanceMonitoring *InstanceMonitoring `type:"structure"` @@ -6586,6 +7065,11 @@ type CreateLaunchConfigurationInput struct { // LaunchConfigurationName is a required field LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + // The metadata options for the instances. For more information, see Configuring + // the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) + // in the Amazon EC2 Auto Scaling User Guide. + MetadataOptions *InstanceMetadataOptions `type:"structure"` + // The tenancy of the instance. An instance with dedicated tenancy runs on isolated, // single-tenant hardware and can only be launched into a VPC. // @@ -6693,6 +7177,11 @@ func (s *CreateLaunchConfigurationInput) Validate() error { } } } + if s.MetadataOptions != nil { + if err := s.MetadataOptions.Validate(); err != nil { + invalidParams.AddNested("MetadataOptions", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6778,6 +7267,12 @@ func (s *CreateLaunchConfigurationInput) SetLaunchConfigurationName(v string) *C return s } +// SetMetadataOptions sets the MetadataOptions field's value. +func (s *CreateLaunchConfigurationInput) SetMetadataOptions(v *InstanceMetadataOptions) *CreateLaunchConfigurationInput { + s.MetadataOptions = v + return s +} + // SetPlacementTenancy sets the PlacementTenancy field's value. func (s *CreateLaunchConfigurationInput) SetPlacementTenancy(v string) *CreateLaunchConfigurationInput { s.PlacementTenancy = &v @@ -7482,12 +7977,12 @@ func (s DescribeAccountLimitsInput) GoString() string { type DescribeAccountLimitsOutput struct { _ struct{} `type:"structure"` - // The maximum number of groups allowed for your AWS account. The default limit - // is 200 per AWS Region. + // The maximum number of groups allowed for your AWS account. The default is + // 200 groups per AWS Region. MaxNumberOfAutoScalingGroups *int64 `type:"integer"` // The maximum number of launch configurations allowed for your AWS account. - // The default limit is 200 per AWS Region. + // The default is 200 launch configurations per AWS Region. MaxNumberOfLaunchConfigurations *int64 `type:"integer"` // The current number of groups for your AWS account. @@ -7769,6 +8264,111 @@ func (s *DescribeAutoScalingNotificationTypesOutput) SetAutoScalingNotificationT return s } +type DescribeInstanceRefreshesInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance refresh IDs. + InstanceRefreshIds []*string `type:"list"` + + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceRefreshesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceRefreshesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceRefreshesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceRefreshesInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *DescribeInstanceRefreshesInput) SetAutoScalingGroupName(v string) *DescribeInstanceRefreshesInput { + s.AutoScalingGroupName = &v + return s +} + +// SetInstanceRefreshIds sets the InstanceRefreshIds field's value. +func (s *DescribeInstanceRefreshesInput) SetInstanceRefreshIds(v []*string) *DescribeInstanceRefreshesInput { + s.InstanceRefreshIds = v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeInstanceRefreshesInput) SetMaxRecords(v int64) *DescribeInstanceRefreshesInput { + s.MaxRecords = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceRefreshesInput) SetNextToken(v string) *DescribeInstanceRefreshesInput { + s.NextToken = &v + return s +} + +type DescribeInstanceRefreshesOutput struct { + _ struct{} `type:"structure"` + + // The instance refreshes for the specified group. + InstanceRefreshes []*InstanceRefresh `type:"list"` + + // A string that indicates that the response contains more items than can be + // returned in a single response. To receive additional items, specify this + // string for the NextToken value when requesting the next set of items. This + // value is null when there are no more items to return. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceRefreshesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceRefreshesOutput) GoString() string { + return s.String() +} + +// SetInstanceRefreshes sets the InstanceRefreshes field's value. +func (s *DescribeInstanceRefreshesOutput) SetInstanceRefreshes(v []*InstanceRefresh) *DescribeInstanceRefreshesOutput { + s.InstanceRefreshes = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceRefreshesOutput) SetNextToken(v string) *DescribeInstanceRefreshesOutput { + s.NextToken = &v + return s +} + type DescribeLaunchConfigurationsInput struct { _ struct{} `type:"structure"` @@ -9011,8 +9611,7 @@ type DisableMetricsCollectionInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // One or more of the following metrics. If you omit this parameter, all metrics - // are disabled. + // Specifies one or more of the following metrics: // // * GroupMinSize // @@ -9029,6 +9628,18 @@ type DisableMetricsCollectionInput struct { // * GroupTerminatingInstances // // * GroupTotalInstances + // + // * GroupInServiceCapacity + // + // * GroupPendingCapacity + // + // * GroupStandbyCapacity + // + // * GroupTerminatingCapacity + // + // * GroupTotalCapacity + // + // If you omit this parameter, all metrics are disabled. Metrics []*string `type:"list"` } @@ -9084,7 +9695,8 @@ func (s DisableMetricsCollectionOutput) GoString() string { return s.String() } -// Describes an Amazon EBS volume. Used in combination with BlockDeviceMapping. +// Describes information used to set up an Amazon EBS volume specified in a +// block device mapping. type Ebs struct { _ struct{} `type:"structure"` @@ -9122,15 +9734,13 @@ type Ebs struct { // see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon EC2 User Guide for Linux Instances. // - // Conditional: This parameter is required when the volume type is io1. (Not - // used with standard, gp2, st1, or sc1 volumes.) + // Required when the volume type is io1. (Not used with standard, gp2, st1, + // or sc1 volumes.) Iops *int64 `min:"100" type:"integer"` // The snapshot ID of the volume to use. // - // Conditional: This parameter is optional if you specify a volume size. If - // you specify both SnapshotId and VolumeSize, VolumeSize must be equal or greater - // than the size of the snapshot. + // You must specify either a VolumeSize or a SnapshotId. SnapshotId *string `min:"1" type:"string"` // The volume size, in Gibibytes (GiB). @@ -9142,7 +9752,9 @@ type Ebs struct { // Default: If you create a volume from a snapshot and you don't specify a volume // size, the default is the snapshot size. // - // At least one of VolumeSize or SnapshotId is required. + // You must specify either a VolumeSize or a SnapshotId. If you specify both + // SnapshotId and VolumeSize, the volume size must be equal or greater than + // the size of the snapshot. VolumeSize *int64 `min:"1" type:"integer"` // The volume type, which can be standard for Magnetic, io1 for Provisioned @@ -9236,8 +9848,8 @@ type EnableMetricsCollectionInput struct { // Granularity is a required field Granularity *string `min:"1" type:"string" required:"true"` - // One or more of the following metrics. If you omit this parameter, all metrics - // are enabled. + // Specifies which group-level metrics to start collecting. You can specify + // one or more of the following metrics: // // * GroupMinSize // @@ -9254,6 +9866,20 @@ type EnableMetricsCollectionInput struct { // * GroupTerminatingInstances // // * GroupTotalInstances + // + // The instance weighting feature supports the following additional metrics: + // + // * GroupInServiceCapacity + // + // * GroupPendingCapacity + // + // * GroupStandbyCapacity + // + // * GroupTerminatingCapacity + // + // * GroupTotalCapacity + // + // If you omit this parameter, all metrics are enabled. Metrics []*string `type:"list"` } @@ -9345,6 +9971,16 @@ type EnabledMetric struct { // * GroupTerminatingInstances // // * GroupTotalInstances + // + // * GroupInServiceCapacity + // + // * GroupPendingCapacity + // + // * GroupStandbyCapacity + // + // * GroupTerminatingCapacity + // + // * GroupTotalCapacity Metric *string `min:"1" type:"string"` } @@ -9466,16 +10102,14 @@ type ExecutePolicyInput struct { // The breach threshold for the alarm. // - // Conditional: This parameter is required if the policy type is StepScaling - // and not supported otherwise. + // Required if the policy type is StepScaling and not supported otherwise. BreachThreshold *float64 `type:"double"` // Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to // complete before executing the policy. // - // This parameter is not supported if the policy type is StepScaling or TargetTrackingScaling. - // - // For more information, see Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // Valid only if the policy type is SimpleScaling. For more information, see + // Scaling Cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. HonorCooldown *bool `type:"boolean"` @@ -9488,8 +10122,7 @@ type ExecutePolicyInput struct { // If you specify a metric value that doesn't correspond to a step adjustment // for the policy, the call returns an error. // - // Conditional: This parameter is required if the policy type is StepScaling - // and not supported otherwise. + // Required if the policy type is StepScaling and not supported otherwise. MetricValue *float64 `type:"double"` // The name or ARN of the policy. @@ -9688,15 +10321,19 @@ func (s *FailedScheduledUpdateGroupActionRequest) SetScheduledActionName(v strin return s } -// Describes a filter. +// Describes a filter that is used to return a more specific list of results +// when describing tags. +// +// For more information, see Tagging Auto Scaling Groups and Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html) +// in the Amazon EC2 Auto Scaling User Guide. type Filter struct { _ struct{} `type:"structure"` - // The name of the filter. The valid values are: "auto-scaling-group", "key", - // "value", and "propagate-at-launch". + // The name of the filter. The valid values are: auto-scaling-group, key, value, + // and propagate-at-launch. Name *string `type:"string"` - // The value of the filter. + // One or more filter values. Filter values are case-sensitive. Values []*string `type:"list"` } @@ -9739,13 +10376,15 @@ type Group struct { // AvailabilityZones is a required field AvailabilityZones []*string `min:"1" type:"list" required:"true"` + // Indicates whether capacity rebalance is enabled. + CapacityRebalance *bool `type:"boolean"` + // The date and time the group was created. // // CreatedTime is a required field CreatedTime *time.Time `type:"timestamp" required:"true"` - // The amount of time, in seconds, after a scaling activity completes before - // another scaling activity can start. + // The duration of the default cooldown period, in seconds. // // DefaultCooldown is a required field DefaultCooldown *int64 `type:"integer" required:"true"` @@ -9784,7 +10423,7 @@ type Group struct { // The maximum amount of time, in seconds, that an instance can be in service. // - // Valid Range: Minimum value of 604800. + // Valid Range: Minimum value of 0. MaxInstanceLifetime *int64 `type:"integer"` // The maximum size of the group. @@ -9811,7 +10450,8 @@ type Group struct { // group uses to call other AWS services on your behalf. ServiceLinkedRoleARN *string `min:"1" type:"string"` - // The current state of the group when DeleteAutoScalingGroup is in progress. + // The current state of the group when the DeleteAutoScalingGroup operation + // is in progress. Status *string `min:"1" type:"string"` // The suspended processes associated with the group. @@ -9858,6 +10498,12 @@ func (s *Group) SetAvailabilityZones(v []*string) *Group { return s } +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *Group) SetCapacityRebalance(v bool) *Group { + s.CapacityRebalance = &v + return s +} + // SetCreatedTime sets the CreatedTime field's value. func (s *Group) SetCreatedTime(v time.Time) *Group { s.CreatedTime = &v @@ -10235,6 +10881,84 @@ func (s *InstanceDetails) SetWeightedCapacity(v string) *InstanceDetails { return s } +// The metadata options for the instances. For more information, see Configuring +// the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) +// in the Amazon EC2 Auto Scaling User Guide. +type InstanceMetadataOptions struct { + _ struct{} `type:"structure"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the default state is enabled. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint *string `type:"string" enum:"InstanceMetadataEndpointState"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. + // + // Default: 1 + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `min:"1" type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credentials + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens *string `type:"string" enum:"InstanceMetadataHttpTokensState"` +} + +// String returns the string representation +func (s InstanceMetadataOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMetadataOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceMetadataOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceMetadataOptions"} + if s.HttpPutResponseHopLimit != nil && *s.HttpPutResponseHopLimit < 1 { + invalidParams.Add(request.NewErrParamMinValue("HttpPutResponseHopLimit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHttpEndpoint sets the HttpEndpoint field's value. +func (s *InstanceMetadataOptions) SetHttpEndpoint(v string) *InstanceMetadataOptions { + s.HttpEndpoint = &v + return s +} + +// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value. +func (s *InstanceMetadataOptions) SetHttpPutResponseHopLimit(v int64) *InstanceMetadataOptions { + s.HttpPutResponseHopLimit = &v + return s +} + +// SetHttpTokens sets the HttpTokens field's value. +func (s *InstanceMetadataOptions) SetHttpTokens(v string) *InstanceMetadataOptions { + s.HttpTokens = &v + return s +} + // Describes whether detailed monitoring is enabled for the Auto Scaling instances. type InstanceMonitoring struct { _ struct{} `type:"structure"` @@ -10259,8 +10983,115 @@ func (s *InstanceMonitoring) SetEnabled(v bool) *InstanceMonitoring { return s } -// Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy. -// +// Describes an instance refresh for an Auto Scaling group. +type InstanceRefresh struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The date and time at which the instance refresh ended. + EndTime *time.Time `type:"timestamp"` + + // The instance refresh ID. + InstanceRefreshId *string `min:"1" type:"string"` + + // The number of instances remaining to update before the instance refresh is + // complete. + InstancesToUpdate *int64 `type:"integer"` + + // The percentage of the instance refresh that is complete. For each instance + // replacement, Amazon EC2 Auto Scaling tracks the instance's health status + // and warm-up time. When the instance's health status changes to healthy and + // the specified warm-up time passes, the instance is considered updated and + // added to the percentage complete. + PercentageComplete *int64 `type:"integer"` + + // The date and time at which the instance refresh began. + StartTime *time.Time `type:"timestamp"` + + // The current status for the instance refresh operation: + // + // * Pending - The request was created, but the operation has not started. + // + // * InProgress - The operation is in progress. + // + // * Successful - The operation completed successfully. + // + // * Failed - The operation failed to complete. You can troubleshoot using + // the status reason and the scaling activities. + // + // * Cancelling - An ongoing operation is being cancelled. Cancellation does + // not roll back any replacements that have already been completed, but it + // prevents new replacements from being started. + // + // * Cancelled - The operation is cancelled. + Status *string `type:"string" enum:"InstanceRefreshStatus"` + + // Provides more details about the current status of the instance refresh. + StatusReason *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s InstanceRefresh) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceRefresh) GoString() string { + return s.String() +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *InstanceRefresh) SetAutoScalingGroupName(v string) *InstanceRefresh { + s.AutoScalingGroupName = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *InstanceRefresh) SetEndTime(v time.Time) *InstanceRefresh { + s.EndTime = &v + return s +} + +// SetInstanceRefreshId sets the InstanceRefreshId field's value. +func (s *InstanceRefresh) SetInstanceRefreshId(v string) *InstanceRefresh { + s.InstanceRefreshId = &v + return s +} + +// SetInstancesToUpdate sets the InstancesToUpdate field's value. +func (s *InstanceRefresh) SetInstancesToUpdate(v int64) *InstanceRefresh { + s.InstancesToUpdate = &v + return s +} + +// SetPercentageComplete sets the PercentageComplete field's value. +func (s *InstanceRefresh) SetPercentageComplete(v int64) *InstanceRefresh { + s.PercentageComplete = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *InstanceRefresh) SetStartTime(v time.Time) *InstanceRefresh { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstanceRefresh) SetStatus(v string) *InstanceRefresh { + s.Status = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *InstanceRefresh) SetStatusReason(v string) *InstanceRefresh { + s.StatusReason = &v + return s +} + +// Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy. +// // The instances distribution specifies the distribution of On-Demand Instances // and Spot Instances, the maximum price to pay for Spot Instances, and how // the Auto Scaling group allocates instance types to fulfill On-Demand and @@ -10460,7 +11291,7 @@ type LaunchConfiguration struct { // or basic (false) monitoring. // // For more information, see Configure Monitoring for Auto Scaling Instances - // (https://docs.aws.amazon.com/autoscaling/latest/userguide/as-instance-monitoring.html#enable-as-instance-metrics) + // (https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) // in the Amazon EC2 Auto Scaling User Guide. InstanceMonitoring *InstanceMonitoring `type:"structure"` @@ -10490,6 +11321,11 @@ type LaunchConfiguration struct { // LaunchConfigurationName is a required field LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + // The metadata options for the instances. For more information, see Configuring + // the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) + // in the Amazon EC2 Auto Scaling User Guide. + MetadataOptions *InstanceMetadataOptions `type:"structure"` + // The tenancy of the instance, either default or dedicated. An instance with // dedicated tenancy runs on isolated, single-tenant hardware and can only be // launched into a VPC. @@ -10618,6 +11454,12 @@ func (s *LaunchConfiguration) SetLaunchConfigurationName(v string) *LaunchConfig return s } +// SetMetadataOptions sets the MetadataOptions field's value. +func (s *LaunchConfiguration) SetMetadataOptions(v *InstanceMetadataOptions) *LaunchConfiguration { + s.MetadataOptions = v + return s +} + // SetPlacementTenancy sets the PlacementTenancy field's value. func (s *LaunchConfiguration) SetPlacementTenancy(v string) *LaunchConfiguration { s.PlacementTenancy = &v @@ -10665,9 +11507,12 @@ type LaunchTemplate struct { // or launch template name in the request. LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"` - // An optional setting. Any parameters that you specify override the same parameters - // in the launch template. Currently, the only supported override is instance - // type. You can specify between 1 and 20 instance types. + // Any parameters that you specify override the same parameters in the launch + // template. Currently, the only supported override is instance type. You can + // specify between 1 and 20 instance types. + // + // If not provided, Amazon EC2 Auto Scaling will use the instance type specified + // in the launch template to launch instances. Overrides []*LaunchTemplateOverrides `type:"list"` } @@ -10718,11 +11563,16 @@ func (s *LaunchTemplate) SetOverrides(v []*LaunchTemplateOverrides) *LaunchTempl return s } -// Describes an override for a launch template. +// Describes an override for a launch template. Currently, the only supported +// override is instance type. +// +// The maximum number of instance type overrides that can be associated with +// an Auto Scaling group is 20. type LaunchTemplateOverrides struct { _ struct{} `type:"structure"` - // The instance type. + // The instance type. You must use an instance type that is supported in your + // requested Region and Availability Zones. // // For information about available instance types, see Available Instance Types // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) @@ -10735,6 +11585,10 @@ type LaunchTemplateOverrides struct { // you chose to set the desired capacity in terms of instances, or a performance // attribute such as vCPUs, memory, or I/O. // + // For more information, see Instance Weighting for Amazon EC2 Auto Scaling + // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-weighting.html) + // in the Amazon EC2 Auto Scaling User Guide. + // // Valid Range: Minimum value of 1. Maximum value of 999. WeightedCapacity *string `min:"1" type:"string"` } @@ -10777,7 +11631,8 @@ func (s *LaunchTemplateOverrides) SetWeightedCapacity(v string) *LaunchTemplateO return s } -// Describes a launch template and the launch template version. +// Describes the Amazon EC2 launch template and the launch template version +// that can be used by an Auto Scaling group to configure Amazon EC2 instances. // // The launch template that is specified must be configured for use with an // Auto Scaling group. For more information, see Creating a Launch Template @@ -10786,19 +11641,34 @@ func (s *LaunchTemplateOverrides) SetWeightedCapacity(v string) *LaunchTemplateO type LaunchTemplateSpecification struct { _ struct{} `type:"structure"` - // The ID of the launch template. You must specify either a template ID or a - // template name. + // The ID of the launch template. To get the template ID, use the Amazon EC2 + // DescribeLaunchTemplates (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLaunchTemplates.html) + // API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) + // API. + // + // You must specify either a template ID or a template name. LaunchTemplateId *string `min:"1" type:"string"` - // The name of the launch template. You must specify either a template name - // or a template ID. + // The name of the launch template. To get the template name, use the Amazon + // EC2 DescribeLaunchTemplates (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLaunchTemplates.html) + // API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) + // API. + // + // You must specify either a template ID or a template name. LaunchTemplateName *string `min:"3" type:"string"` - // The version number, $Latest, or $Default. If the value is $Latest, Amazon - // EC2 Auto Scaling selects the latest version of the launch template when launching - // instances. If the value is $Default, Amazon EC2 Auto Scaling selects the - // default version of the launch template when launching instances. The default - // value is $Default. + // The version number, $Latest, or $Default. To get the version number, use + // the Amazon EC2 DescribeLaunchTemplateVersions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLaunchTemplateVersions.html) + // API operation. New launch template versions can be created using the Amazon + // EC2 CreateLaunchTemplateVersion (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplateVersion.html) + // API. + // + // If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version + // of the launch template when launching instances. If the value is $Default, + // Amazon EC2 Auto Scaling selects the default version of the launch template + // when launching instances. The default value is $Default. Version *string `min:"1" type:"string"` } @@ -10851,7 +11721,6 @@ func (s *LaunchTemplateSpecification) SetVersion(v string) *LaunchTemplateSpecif // Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you // want to perform an action whenever it launches instances or terminates instances. -// Used in response to DescribeLifecycleHooks. type LifecycleHook struct { _ struct{} `type:"structure"` @@ -10962,7 +11831,8 @@ func (s *LifecycleHook) SetRoleARN(v string) *LifecycleHook { return s } -// Describes a lifecycle hook. Used in combination with CreateAutoScalingGroup. +// Describes information used to specify a lifecycle hook for an Auto Scaling +// group. // // A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an // instance when the instance launches (before it is put into service) or as @@ -10983,18 +11853,12 @@ func (s *LifecycleHook) SetRoleARN(v string) *LifecycleHook { // launch or terminate. // // If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state using RecordLifecycleActionHeartbeat. +// instance in a pending state. // -// If you finish before the timeout period ends, complete the lifecycle action -// using CompleteLifecycleAction. +// If you finish before the timeout period ends, complete the lifecycle action. // // For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks (https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html) // in the Amazon EC2 Auto Scaling User Guide. -// -// You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. -// You can modify an existing lifecycle hook or create new lifecycle hooks using -// PutLifecycleHook. If you are no longer using a lifecycle hook, you can delete -// it using DeleteLifecycleHook. type LifecycleHookSpecification struct { _ struct{} `type:"structure"` @@ -11253,6 +12117,16 @@ type MetricCollectionType struct { // * GroupTerminatingInstances // // * GroupTotalInstances + // + // * GroupInServiceCapacity + // + // * GroupPendingCapacity + // + // * GroupStandbyCapacity + // + // * GroupTerminatingCapacity + // + // * GroupTotalCapacity Metric *string `min:"1" type:"string"` } @@ -11359,7 +12233,8 @@ func (s *MetricGranularityType) SetGranularity(v string) *MetricGranularityType // You can create a mixed instances policy for a new Auto Scaling group, or // you can create it for an existing group by updating the group to specify // MixedInstancesPolicy as the top-level parameter instead of a launch configuration -// or template. For more information, see CreateAutoScalingGroup and UpdateAutoScalingGroup. +// or launch template. For more information, see CreateAutoScalingGroup and +// UpdateAutoScalingGroup. type MixedInstancesPolicy struct { _ struct{} `type:"structure"` @@ -11371,7 +12246,7 @@ type MixedInstancesPolicy struct { // The launch template and instance types (overrides). // - // This parameter must be specified when creating a mixed instances policy. + // Required when creating a mixed instances policy. LaunchTemplate *LaunchTemplate `type:"structure"` } @@ -11491,14 +12366,24 @@ type PredefinedMetricSpecification struct { // a resource label unless the metric type is ALBRequestCountPerTarget and there // is a target group attached to the Auto Scaling group. // - // The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id - // , where + // You create the resource label by appending the final portion of the load + // balancer ARN and the final portion of the target group ARN into a single + // value, separated by a forward slash (/). The format is app///targetgroup//, + // where: + // + // * app// is the final portion of + // the load balancer ARN + // + // * targetgroup// is the final portion + // of the target group ARN. // - // * app/load-balancer-name/load-balancer-id is the final portion of the - // load balancer ARN, and + // This is an example: app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d. // - // * targetgroup/target-group-name/target-group-id is the final portion of - // the target group ARN. + // To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers + // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) + // API operation. To find the ARN for the target group, use the DescribeTargetGroups + // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeTargetGroups.html) + // API operation. ResourceLabel *string `min:"1" type:"string"` } @@ -11561,6 +12446,8 @@ type ProcessType struct { // // * HealthCheck // + // * InstanceRefresh + // // * ReplaceUnhealthy // // * ScheduledActions @@ -11604,7 +12491,7 @@ type PutLifecycleHookInput struct { // // If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the action // that you specified in the DefaultResult parameter. You can prevent the lifecycle - // hook from timing out by calling RecordLifecycleActionHeartbeat. + // hook from timing out by calling the RecordLifecycleActionHeartbeat API. HeartbeatTimeout *int64 `type:"integer"` // The name of the lifecycle hook. @@ -11619,8 +12506,7 @@ type PutLifecycleHookInput struct { // // * autoscaling:EC2_INSTANCE_TERMINATING // - // Conditional: This parameter is required for new lifecycle hooks, but optional - // when updating existing hooks. + // Required for new lifecycle hooks, but optional when updating existing hooks. LifecycleTransition *string `type:"string"` // Additional information that you want to include any time Amazon EC2 Auto @@ -11646,8 +12532,7 @@ type PutLifecycleHookInput struct { // the specified notification target, for example, an Amazon SNS topic or an // Amazon SQS queue. // - // Conditional: This parameter is required for new lifecycle hooks, but optional - // when updating existing hooks. + // Required for new lifecycle hooks, but optional when updating existing hooks. RoleARN *string `min:"1" type:"string"` } @@ -11759,8 +12644,9 @@ type PutNotificationConfigurationInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The type of event that causes the notification to be sent. For more information - // about notification types supported by Amazon EC2 Auto Scaling, see DescribeAutoScalingNotificationTypes. + // The type of event that causes the notification to be sent. To query the notification + // types supported by Amazon EC2 Auto Scaling, call the DescribeAutoScalingNotificationTypes + // API. // // NotificationTypes is a required field NotificationTypes []*string `type:"list" required:"true"` @@ -11842,11 +12728,11 @@ func (s PutNotificationConfigurationOutput) GoString() string { type PutScalingPolicyInput struct { _ struct{} `type:"structure"` - // Specifies whether the ScalingAdjustment parameter is an absolute number or - // a percentage of the current capacity. The valid values are ChangeInCapacity, - // ExactCapacity, and PercentChangeInCapacity. + // Specifies how the scaling adjustment is interpreted (for example, an absolute + // number or a percentage). The valid values are ChangeInCapacity, ExactCapacity, + // and PercentChangeInCapacity. // - // Valid only if the policy type is StepScaling or SimpleScaling. For more information, + // Required if the policy type is StepScaling or SimpleScaling. For more information, // see Scaling Adjustment Types (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-adjustment) // in the Amazon EC2 Auto Scaling User Guide. AdjustmentType *string `min:"1" type:"string"` @@ -11856,20 +12742,26 @@ type PutScalingPolicyInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The amount of time, in seconds, after a scaling activity completes before - // any further dynamic scaling activities can start. If this parameter is not - // specified, the default cooldown period for the group applies. + // The duration of the policy's cooldown period, in seconds. When a cooldown + // period is specified here, it overrides the default cooldown period defined + // for the Auto Scaling group. // // Valid only if the policy type is SimpleScaling. For more information, see - // Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // Scaling Cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. Cooldown *int64 `type:"integer"` + // Indicates whether the scaling policy is enabled or disabled. The default + // is enabled. For more information, see Disabling a Scaling Policy for an Auto + // Scaling Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enable-disable-scaling-policy.html) + // in the Amazon EC2 Auto Scaling User Guide. + Enabled *bool `type:"boolean"` + // The estimated time, in seconds, until a newly launched instance can contribute - // to the CloudWatch metrics. The default is to use the value specified for - // the default cooldown period for the group. + // to the CloudWatch metrics. If not provided, the default is to use the value + // from the default cooldown period for the Auto Scaling group. // - // Valid only if the policy type is StepScaling or TargetTrackingScaling. + // Valid only if the policy type is TargetTrackingScaling or StepScaling. EstimatedInstanceWarmup *int64 `type:"integer"` // The aggregation type for the CloudWatch metrics. The valid values are Minimum, @@ -11879,19 +12771,19 @@ type PutScalingPolicyInput struct { // Valid only if the policy type is StepScaling. MetricAggregationType *string `min:"1" type:"string"` - // The minimum number of instances to scale. If the value of AdjustmentType - // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity - // of the Auto Scaling group by at least this many instances. Otherwise, the - // error is ValidationError. + // The minimum value to scale by when the adjustment type is PercentChangeInCapacity. + // For example, suppose that you create a step scaling policy to scale out an + // Auto Scaling group by 25 percent and you specify a MinAdjustmentMagnitude + // of 2. If the group has 4 instances and the scaling policy is performed, 25 + // percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude + // of 2, Amazon EC2 Auto Scaling scales out the group by 2 instances. // - // This property replaces the MinAdjustmentStep property. For example, suppose - // that you create a step scaling policy to scale out an Auto Scaling group - // by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the group - // has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. - // However, because you specified a MinAdjustmentMagnitude of 2, Amazon EC2 - // Auto Scaling scales out the group by 2 instances. + // Valid only if the policy type is StepScaling or SimpleScaling. For more information, + // see Scaling Adjustment Types (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-adjustment) + // in the Amazon EC2 Auto Scaling User Guide. // - // Valid only if the policy type is SimpleScaling or StepScaling. + // Some Auto Scaling groups use instance weights. In this case, set the MinAdjustmentMagnitude + // to a value that is at least as large as your largest instance weight. MinAdjustmentMagnitude *int64 `type:"integer"` // Available for backward compatibility. Use MinAdjustmentMagnitude instead. @@ -11902,36 +12794,50 @@ type PutScalingPolicyInput struct { // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` - // The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling. - // If the policy type is null, the value is treated as SimpleScaling. + // One of the following policy types: + // + // * TargetTrackingScaling + // + // * StepScaling + // + // * SimpleScaling (default) PolicyType *string `min:"1" type:"string"` - // The amount by which a simple scaling policy scales the Auto Scaling group - // in response to an alarm breach. The adjustment is based on the value that - // you specified in the AdjustmentType parameter (either an absolute number - // or a percentage). A positive value adds to the current capacity and a negative - // value subtracts from the current capacity. For exact capacity, you must specify - // a positive value. + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. For exact capacity, you must specify a positive value. // - // Conditional: If you specify SimpleScaling for the policy type, you must specify - // this parameter. (Not used with any other policy type.) + // Required if the policy type is SimpleScaling. (Not used with any other policy + // type.) ScalingAdjustment *int64 `type:"integer"` // A set of adjustments that enable you to scale based on the size of the alarm // breach. // - // Conditional: If you specify StepScaling for the policy type, you must specify - // this parameter. (Not used with any other policy type.) + // Required if the policy type is StepScaling. (Not used with any other policy + // type.) StepAdjustments []*StepAdjustment `type:"list"` // A target tracking scaling policy. Includes support for predefined or customized // metrics. // + // The following predefined metrics are available: + // + // * ASGAverageCPUUtilization + // + // * ASGAverageNetworkIn + // + // * ASGAverageNetworkOut + // + // * ALBRequestCountPerTarget + // + // If you specify ALBRequestCountPerTarget for the metric, you must specify + // the ResourceLabel parameter with the PredefinedMetricSpecification. + // // For more information, see TargetTrackingConfiguration (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_TargetTrackingConfiguration.html) // in the Amazon EC2 Auto Scaling API Reference. // - // Conditional: If you specify TargetTrackingScaling for the policy type, you - // must specify this parameter. (Not used with any other policy type.) + // Required if the policy type is TargetTrackingScaling. TargetTrackingConfiguration *TargetTrackingConfiguration `type:"structure"` } @@ -12009,6 +12915,12 @@ func (s *PutScalingPolicyInput) SetCooldown(v int64) *PutScalingPolicyInput { return s } +// SetEnabled sets the Enabled field's value. +func (s *PutScalingPolicyInput) SetEnabled(v bool) *PutScalingPolicyInput { + s.Enabled = &v + return s +} + // SetEstimatedInstanceWarmup sets the EstimatedInstanceWarmup field's value. func (s *PutScalingPolicyInput) SetEstimatedInstanceWarmup(v int64) *PutScalingPolicyInput { s.EstimatedInstanceWarmup = &v @@ -12104,17 +13016,19 @@ type PutScheduledUpdateGroupActionInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The number of EC2 instances that should be running in the Auto Scaling group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // the scheduled action runs and the capacity it attempts to maintain. It can + // scale beyond this capacity if you add more scaling conditions. DesiredCapacity *int64 `type:"integer"` // The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling // does not perform the action after this time. EndTime *time.Time `type:"timestamp"` - // The maximum number of instances in the Auto Scaling group. + // The maximum size of the Auto Scaling group. MaxSize *int64 `type:"integer"` - // The minimum number of instances in the Auto Scaling group. + // The minimum size of the Auto Scaling group. MinSize *int64 `type:"integer"` // The recurring schedule for this action, in Unix cron syntax format. This @@ -12348,6 +13262,45 @@ func (s RecordLifecycleActionHeartbeatOutput) GoString() string { return s.String() } +// Describes information used to start an instance refresh. +type RefreshPreferences struct { + _ struct{} `type:"structure"` + + // The number of seconds until a newly launched instance is configured and ready + // to use. During this time, Amazon EC2 Auto Scaling does not immediately move + // on to the next replacement. The default is to use the value for the health + // check grace period defined for the group. + InstanceWarmup *int64 `type:"integer"` + + // The amount of capacity in the Auto Scaling group that must remain healthy + // during an instance refresh to allow the operation to continue, as a percentage + // of the desired capacity of the Auto Scaling group (rounded up to the nearest + // integer). The default is 90. + MinHealthyPercentage *int64 `type:"integer"` +} + +// String returns the string representation +func (s RefreshPreferences) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshPreferences) GoString() string { + return s.String() +} + +// SetInstanceWarmup sets the InstanceWarmup field's value. +func (s *RefreshPreferences) SetInstanceWarmup(v int64) *RefreshPreferences { + s.InstanceWarmup = &v + return s +} + +// SetMinHealthyPercentage sets the MinHealthyPercentage field's value. +func (s *RefreshPreferences) SetMinHealthyPercentage(v int64) *RefreshPreferences { + s.MinHealthyPercentage = &v + return s +} + type ResumeProcessesOutput struct { _ struct{} `type:"structure"` } @@ -12366,8 +13319,9 @@ func (s ResumeProcessesOutput) GoString() string { type ScalingPolicy struct { _ struct{} `type:"structure"` - // The adjustment type, which specifies how ScalingAdjustment is interpreted. - // The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + // Specifies how the scaling adjustment is interpreted (for example, an absolute + // number or a percentage). The valid values are ChangeInCapacity, ExactCapacity, + // and PercentChangeInCapacity. AdjustmentType *string `min:"1" type:"string"` // The CloudWatch alarms related to the policy. @@ -12376,10 +13330,12 @@ type ScalingPolicy struct { // The name of the Auto Scaling group. AutoScalingGroupName *string `min:"1" type:"string"` - // The amount of time, in seconds, after a scaling activity completes before - // any further dynamic scaling activities can start. + // The duration of the policy's cooldown period, in seconds. Cooldown *int64 `type:"integer"` + // Indicates whether the policy is enabled (true) or disabled (false). + Enabled *bool `type:"boolean"` + // The estimated time, in seconds, until a newly launched instance can contribute // to the CloudWatch metrics. EstimatedInstanceWarmup *int64 `type:"integer"` @@ -12388,10 +13344,7 @@ type ScalingPolicy struct { // Maximum, and Average. MetricAggregationType *string `min:"1" type:"string"` - // The minimum number of instances to scale. If the value of AdjustmentType - // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity - // of the Auto Scaling group by at least this many instances. Otherwise, the - // error is ValidationError. + // The minimum value to scale by when the adjustment type is PercentChangeInCapacity. MinAdjustmentMagnitude *int64 `type:"integer"` // Available for backward compatibility. Use MinAdjustmentMagnitude instead. @@ -12403,7 +13356,17 @@ type ScalingPolicy struct { // The name of the scaling policy. PolicyName *string `min:"1" type:"string"` - // The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling. + // One of the following policy types: + // + // * TargetTrackingScaling + // + // * StepScaling + // + // * SimpleScaling (default) + // + // For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-target-tracking.html) + // and Step and Simple Scaling Policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html) + // in the Amazon EC2 Auto Scaling User Guide. PolicyType *string `min:"1" type:"string"` // The amount by which to scale, based on the specified adjustment type. A positive @@ -12453,6 +13416,12 @@ func (s *ScalingPolicy) SetCooldown(v int64) *ScalingPolicy { return s } +// SetEnabled sets the Enabled field's value. +func (s *ScalingPolicy) SetEnabled(v bool) *ScalingPolicy { + s.Enabled = &v + return s +} + // SetEstimatedInstanceWarmup sets the EstimatedInstanceWarmup field's value. func (s *ScalingPolicy) SetEstimatedInstanceWarmup(v int64) *ScalingPolicy { s.EstimatedInstanceWarmup = &v @@ -12521,24 +13490,27 @@ type ScalingProcessQuery struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // One or more of the following processes. If you omit this parameter, all processes - // are specified. + // One or more of the following processes: // // * Launch // // * Terminate // - // * HealthCheck + // * AddToLoadBalancer // - // * ReplaceUnhealthy + // * AlarmNotification // // * AZRebalance // - // * AlarmNotification + // * HealthCheck + // + // * InstanceRefresh + // + // * ReplaceUnhealthy // // * ScheduledActions // - // * AddToLoadBalancer + // If you omit this parameter, all processes are specified. ScalingProcesses []*string `type:"list"` } @@ -12580,24 +13552,25 @@ func (s *ScalingProcessQuery) SetScalingProcesses(v []*string) *ScalingProcessQu return s } -// Describes a scheduled scaling action. Used in response to DescribeScheduledActions. +// Describes a scheduled scaling action. type ScheduledUpdateGroupAction struct { _ struct{} `type:"structure"` // The name of the Auto Scaling group. AutoScalingGroupName *string `min:"1" type:"string"` - // The number of instances you prefer to maintain in the group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // the scheduled action runs and the capacity it attempts to maintain. DesiredCapacity *int64 `type:"integer"` // The date and time in UTC for the recurring schedule to end. For example, // "2019-06-01T00:00:00Z". EndTime *time.Time `type:"timestamp"` - // The maximum number of instances in the Auto Scaling group. + // The maximum size of the Auto Scaling group. MaxSize *int64 `type:"integer"` - // The minimum number of instances in the Auto Scaling group. + // The minimum size of the Auto Scaling group. MinSize *int64 `type:"integer"` // The recurring schedule for the action, in Unix cron syntax format. @@ -12689,25 +13662,26 @@ func (s *ScheduledUpdateGroupAction) SetTime(v time.Time) *ScheduledUpdateGroupA return s } -// Describes one or more scheduled scaling action updates for a specified Auto -// Scaling group. Used in combination with BatchPutScheduledUpdateGroupAction. +// Describes information used for one or more scheduled scaling action updates +// in a BatchPutScheduledUpdateGroupAction operation. // // When updating a scheduled scaling action, all optional parameters are left // unchanged if not specified. type ScheduledUpdateGroupActionRequest struct { _ struct{} `type:"structure"` - // The number of EC2 instances that should be running in the group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // the scheduled action runs and the capacity it attempts to maintain. DesiredCapacity *int64 `type:"integer"` // The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling // does not perform the action after this time. EndTime *time.Time `type:"timestamp"` - // The maximum number of instances in the Auto Scaling group. + // The maximum size of the Auto Scaling group. MaxSize *int64 `type:"integer"` - // The minimum number of instances in the Auto Scaling group. + // The minimum size of the Auto Scaling group. MinSize *int64 `type:"integer"` // The recurring schedule for the action, in Unix cron syntax format. This format @@ -12815,7 +13789,8 @@ type SetDesiredCapacityInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The number of EC2 instances that should be running in the Auto Scaling group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // this operation completes and the capacity it attempts to maintain. // // DesiredCapacity is a required field DesiredCapacity *int64 `type:"integer" required:"true"` @@ -12908,7 +13883,9 @@ type SetInstanceHealthInput struct { // Set this to False, to have the call not respect the grace period associated // with the group. // - // For more information about the health check grace period, see CreateAutoScalingGroup. + // For more information about the health check grace period, see CreateAutoScalingGroup + // (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_CreateAutoScalingGroup.html) + // in the Amazon EC2 Auto Scaling API Reference. ShouldRespectGracePeriod *bool `type:"boolean"` } @@ -13060,9 +14037,103 @@ func (s SetInstanceProtectionOutput) GoString() string { return s.String() } -// Describes an adjustment based on the difference between the value of the -// aggregated CloudWatch metric and the breach threshold that you've defined -// for the alarm. Used in combination with PutScalingPolicy. +type StartInstanceRefreshInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Set of preferences associated with the instance refresh request. + // + // If not provided, the default values are used. For MinHealthyPercentage, the + // default value is 90. For InstanceWarmup, the default is to use the value + // specified for the health check grace period for the Auto Scaling group. + // + // For more information, see RefreshPreferences (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_RefreshPreferences.html) + // in the Amazon EC2 Auto Scaling API Reference. + Preferences *RefreshPreferences `type:"structure"` + + // The strategy to use for the instance refresh. The only valid value is Rolling. + // + // A rolling update is an update that is applied to all instances in an Auto + // Scaling group until all instances have been updated. A rolling update can + // fail due to failed health checks or if instances are on standby or are protected + // from scale in. If the rolling update process fails, any instances that were + // already replaced are not rolled back to their previous configuration. + Strategy *string `type:"string" enum:"RefreshStrategy"` +} + +// String returns the string representation +func (s StartInstanceRefreshInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceRefreshInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartInstanceRefreshInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartInstanceRefreshInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *StartInstanceRefreshInput) SetAutoScalingGroupName(v string) *StartInstanceRefreshInput { + s.AutoScalingGroupName = &v + return s +} + +// SetPreferences sets the Preferences field's value. +func (s *StartInstanceRefreshInput) SetPreferences(v *RefreshPreferences) *StartInstanceRefreshInput { + s.Preferences = v + return s +} + +// SetStrategy sets the Strategy field's value. +func (s *StartInstanceRefreshInput) SetStrategy(v string) *StartInstanceRefreshInput { + s.Strategy = &v + return s +} + +type StartInstanceRefreshOutput struct { + _ struct{} `type:"structure"` + + // A unique ID for tracking the progress of the request. + InstanceRefreshId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s StartInstanceRefreshOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceRefreshOutput) GoString() string { + return s.String() +} + +// SetInstanceRefreshId sets the InstanceRefreshId field's value. +func (s *StartInstanceRefreshOutput) SetInstanceRefreshId(v string) *StartInstanceRefreshOutput { + s.InstanceRefreshId = &v + return s +} + +// Describes information used to create a step adjustment for a step scaling +// policy. // // For the following examples, suppose that you have an alarm with a breach // threshold of 50: @@ -13088,6 +14159,9 @@ func (s SetInstanceProtectionOutput) GoString() string { // with a null upper bound. // // * The upper and lower bound can't be null in the same step adjustment. +// +// For more information, see Step Adjustments (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-steps) +// in the Amazon EC2 Auto Scaling User Guide. type StepAdjustment struct { _ struct{} `type:"structure"` @@ -13171,8 +14245,10 @@ func (s SuspendProcessesOutput) GoString() string { return s.String() } -// Describes an automatic scaling process that has been suspended. For more -// information, see ProcessType. +// Describes an automatic scaling process that has been suspended. +// +// For more information, see Scaling Processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types) +// in the Amazon EC2 Auto Scaling User Guide. type SuspendedProcess struct { _ struct{} `type:"structure"` @@ -13517,17 +14593,29 @@ type UpdateAutoScalingGroupInput struct { // One or more Availability Zones for the group. AvailabilityZones []*string `min:"1" type:"list"` + // Enables or disables capacity rebalance. + // + // You can enable capacity rebalancing for your Auto Scaling groups when using + // Spot Instances. When you turn on capacity rebalancing, Amazon EC2 Auto Scaling + // attempts to launch a Spot Instance whenever Amazon EC2 predicts that a Spot + // Instance is at an elevated risk of interruption. After launching a new instance, + // it then terminates an old instance. For more information, see Amazon EC2 + // Auto Scaling capacity rebalancing (https://docs.aws.amazon.com/autoscaling/ec2/userguide/capacity-rebalance.html) + // in the Amazon EC2 Auto Scaling User Guide. + CapacityRebalance *bool `type:"boolean"` + // The amount of time, in seconds, after a scaling activity completes before - // another scaling activity can start. The default value is 300. This cooldown - // period is not used when a scaling-specific cooldown is specified. + // another scaling activity can start. The default value is 300. // - // Cooldown periods are not supported for target tracking scaling policies, - // step scaling policies, or scheduled scaling. For more information, see Scaling - // Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // This setting applies when using simple scaling policies, but not when using + // other scaling policies or scheduled scaling. For more information, see Scaling + // Cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. DefaultCooldown *int64 `type:"integer"` - // The number of EC2 instances that should be running in the Auto Scaling group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // this operation completes and the capacity it attempts to maintain. + // // This number must be greater than or equal to the minimum size of the group // and less than or equal to the maximum size of the group. DesiredCapacity *int64 `type:"integer"` @@ -13539,7 +14627,7 @@ type UpdateAutoScalingGroupInput struct { // For more information, see Health Check Grace Period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period) // in the Amazon EC2 Auto Scaling User Guide. // - // Conditional: This parameter is required if you are adding an ELB health check. + // Required if you are adding an ELB health check. HealthCheckGracePeriod *int64 `type:"integer"` // The service to use for the health checks. The valid values are EC2 and ELB. @@ -13561,11 +14649,26 @@ type UpdateAutoScalingGroupInput struct { LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // The maximum amount of time, in seconds, that an instance can be in service. + // The default is null. // - // Valid Range: Minimum value of 604800. + // This parameter is optional, but if you specify a value for it, you must specify + // a value of at least 604,800 seconds (7 days). To clear a previously set value, + // specify a new value of 0. + // + // For more information, see Replacing Auto Scaling Instances Based on Maximum + // Instance Lifetime (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) + // in the Amazon EC2 Auto Scaling User Guide. + // + // Valid Range: Minimum value of 0. MaxInstanceLifetime *int64 `type:"integer"` // The maximum size of the Auto Scaling group. + // + // With a mixed instances policy that uses instance weighting, Amazon EC2 Auto + // Scaling may need to go above MaxSize to meet your capacity requirements. + // In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more + // than your largest instance weight (weights that define how many units each + // instance contributes to the desired capacity of the group). MaxSize *int64 `type:"integer"` // The minimum size of the Auto Scaling group. @@ -13685,6 +14788,12 @@ func (s *UpdateAutoScalingGroupInput) SetAvailabilityZones(v []*string) *UpdateA return s } +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *UpdateAutoScalingGroupInput) SetCapacityRebalance(v bool) *UpdateAutoScalingGroupInput { + s.CapacityRebalance = &v + return s +} + // SetDefaultCooldown sets the DefaultCooldown field's value. func (s *UpdateAutoScalingGroupInput) SetDefaultCooldown(v int64) *UpdateAutoScalingGroupInput { s.DefaultCooldown = &v @@ -13789,6 +14898,70 @@ func (s UpdateAutoScalingGroupOutput) GoString() string { return s.String() } +const ( + // InstanceMetadataEndpointStateDisabled is a InstanceMetadataEndpointState enum value + InstanceMetadataEndpointStateDisabled = "disabled" + + // InstanceMetadataEndpointStateEnabled is a InstanceMetadataEndpointState enum value + InstanceMetadataEndpointStateEnabled = "enabled" +) + +// InstanceMetadataEndpointState_Values returns all elements of the InstanceMetadataEndpointState enum +func InstanceMetadataEndpointState_Values() []string { + return []string{ + InstanceMetadataEndpointStateDisabled, + InstanceMetadataEndpointStateEnabled, + } +} + +const ( + // InstanceMetadataHttpTokensStateOptional is a InstanceMetadataHttpTokensState enum value + InstanceMetadataHttpTokensStateOptional = "optional" + + // InstanceMetadataHttpTokensStateRequired is a InstanceMetadataHttpTokensState enum value + InstanceMetadataHttpTokensStateRequired = "required" +) + +// InstanceMetadataHttpTokensState_Values returns all elements of the InstanceMetadataHttpTokensState enum +func InstanceMetadataHttpTokensState_Values() []string { + return []string{ + InstanceMetadataHttpTokensStateOptional, + InstanceMetadataHttpTokensStateRequired, + } +} + +const ( + // InstanceRefreshStatusPending is a InstanceRefreshStatus enum value + InstanceRefreshStatusPending = "Pending" + + // InstanceRefreshStatusInProgress is a InstanceRefreshStatus enum value + InstanceRefreshStatusInProgress = "InProgress" + + // InstanceRefreshStatusSuccessful is a InstanceRefreshStatus enum value + InstanceRefreshStatusSuccessful = "Successful" + + // InstanceRefreshStatusFailed is a InstanceRefreshStatus enum value + InstanceRefreshStatusFailed = "Failed" + + // InstanceRefreshStatusCancelling is a InstanceRefreshStatus enum value + InstanceRefreshStatusCancelling = "Cancelling" + + // InstanceRefreshStatusCancelled is a InstanceRefreshStatus enum value + InstanceRefreshStatusCancelled = "Cancelled" +) + +// InstanceRefreshStatus_Values returns all elements of the InstanceRefreshStatus enum +func InstanceRefreshStatus_Values() []string { + return []string{ + InstanceRefreshStatusPending, + InstanceRefreshStatusInProgress, + InstanceRefreshStatusSuccessful, + InstanceRefreshStatusFailed, + InstanceRefreshStatusCancelling, + InstanceRefreshStatusCancelled, + } +} + const ( // LifecycleStatePending is a LifecycleState enum value LifecycleStatePending = "Pending" @@ -13830,6 +15003,25 @@ const ( LifecycleStateStandby = "Standby" ) +// LifecycleState_Values returns all elements of the LifecycleState enum +func LifecycleState_Values() []string { + return []string{ + LifecycleStatePending, + LifecycleStatePendingWait, + LifecycleStatePendingProceed, + LifecycleStateQuarantined, + LifecycleStateInService, + LifecycleStateTerminating, + LifecycleStateTerminatingWait, + LifecycleStateTerminatingProceed, + LifecycleStateTerminated, + LifecycleStateDetaching, + LifecycleStateDetached, + LifecycleStateEnteringStandby, + LifecycleStateStandby, + } +} + const ( // MetricStatisticAverage is a MetricStatistic enum value MetricStatisticAverage = "Average" @@ -13847,6 +15039,17 @@ const ( MetricStatisticSum = "Sum" ) +// MetricStatistic_Values returns all elements of the MetricStatistic enum +func MetricStatistic_Values() []string { + return []string{ + MetricStatisticAverage, + MetricStatisticMinimum, + MetricStatisticMaximum, + MetricStatisticSampleCount, + MetricStatisticSum, + } +} + const ( // MetricTypeAsgaverageCpuutilization is a MetricType enum value MetricTypeAsgaverageCpuutilization = "ASGAverageCPUUtilization" @@ -13861,6 +15064,28 @@ const ( MetricTypeAlbrequestCountPerTarget = "ALBRequestCountPerTarget" ) +// MetricType_Values returns all elements of the MetricType enum +func MetricType_Values() []string { + return []string{ + MetricTypeAsgaverageCpuutilization, + MetricTypeAsgaverageNetworkIn, + MetricTypeAsgaverageNetworkOut, + MetricTypeAlbrequestCountPerTarget, + } +} + +const ( + // RefreshStrategyRolling is a RefreshStrategy enum value + RefreshStrategyRolling = "Rolling" +) + +// RefreshStrategy_Values returns all elements of the RefreshStrategy enum +func RefreshStrategy_Values() []string { + return []string{ + RefreshStrategyRolling, + } +} + const ( // ScalingActivityStatusCodePendingSpotBidPlacement is a ScalingActivityStatusCode enum value ScalingActivityStatusCodePendingSpotBidPlacement = "PendingSpotBidPlacement" @@ -13898,3 +15123,21 @@ const ( // ScalingActivityStatusCodeCancelled is a ScalingActivityStatusCode enum value ScalingActivityStatusCodeCancelled = "Cancelled" ) + +// ScalingActivityStatusCode_Values returns all elements of the ScalingActivityStatusCode enum +func ScalingActivityStatusCode_Values() []string { + return []string{ + ScalingActivityStatusCodePendingSpotBidPlacement, + ScalingActivityStatusCodeWaitingForSpotInstanceRequestId, + ScalingActivityStatusCodeWaitingForSpotInstanceId, + ScalingActivityStatusCodeWaitingForInstanceId, + ScalingActivityStatusCodePreInService, + ScalingActivityStatusCodeInProgress, + ScalingActivityStatusCodeWaitingForElbconnectionDraining, + ScalingActivityStatusCodeMidLifecycleAction, + ScalingActivityStatusCodeWaitingForInstanceWarmup, + ScalingActivityStatusCodeSuccessful, + ScalingActivityStatusCodeFailed, + ScalingActivityStatusCodeCancelled, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go index 2e65ee3d93fb..85e907df74fe 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go @@ -4,6 +4,13 @@ package autoscaling const ( + // ErrCodeActiveInstanceRefreshNotFoundFault for service response error code + // "ActiveInstanceRefreshNotFound". + // + // The request failed because an active instance refresh for the specified Auto + // Scaling group was not found. + ErrCodeActiveInstanceRefreshNotFoundFault = "ActiveInstanceRefreshNotFound" + // ErrCodeAlreadyExistsFault for service response error code // "AlreadyExists". // @@ -11,6 +18,13 @@ const ( // name. ErrCodeAlreadyExistsFault = "AlreadyExists" + // ErrCodeInstanceRefreshInProgressFault for service response error code + // "InstanceRefreshInProgress". + // + // The request failed because an active instance refresh operation already exists + // for the specified Auto Scaling group. + ErrCodeInstanceRefreshInProgressFault = "InstanceRefreshInProgress" + // ErrCodeInvalidNextToken for service response error code // "InvalidNextToken". // @@ -22,7 +36,8 @@ const ( // // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). - // For more information, see DescribeAccountLimits. + // For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) + // in the Amazon EC2 Auto Scaling API Reference. ErrCodeLimitExceededFault = "LimitExceeded" // ErrCodeResourceContentionFault for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index a33cce5d7e3b..ba7661add13d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -440,8 +440,8 @@ func (c *EC2) AdvertiseByoipCidrRequest(input *AdvertiseByoipCidrInput) (req *re // AdvertiseByoipCidr API operation for Amazon Elastic Compute Cloud. // -// Advertises an IPv4 address range that is provisioned for use with your AWS -// resources through bring your own IP addresses (BYOIP). +// Advertises an IPv4 or IPv6 address range that is provisioned for use with +// your AWS resources through bring your own IP addresses (BYOIP). // // You can perform this operation at most once every 10 seconds, even if you // specify different address ranges each time. @@ -554,6 +554,10 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) // in the Amazon Elastic Compute Cloud User Guide. // +// You can allocate a carrier IP address which is a public IP address from a +// telecommunication carrier, to a network interface which resides in a subnet +// in a Wavelength Zone (for example an EC2 instance). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -787,6 +791,9 @@ func (c *EC2) AssignIpv6AddressesRequest(input *AssignIpv6AddressesInput) (req * // Type (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI) // in the Amazon Elastic Compute Cloud User Guide. // +// You must specify either the IPv6 addresses or the IPv6 address count in the +// request. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -878,6 +885,8 @@ func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInp // address from one network interface to another, check network/interfaces/macs/mac/local-ipv4s // in the instance metadata to confirm that the remapping is complete. // +// You must specify either the IP addresses or the IP address count in the request. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -950,7 +959,8 @@ func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *reques // AssociateAddress API operation for Amazon Elastic Compute Cloud. // -// Associates an Elastic IP address with an instance or a network interface. +// Associates an Elastic IP address, or carrier IP address (for instances that +// are in subnets in Wavelength Zones) with an instance or a network interface. // Before you can use an Elastic IP address, you must allocate it to your account. // // An Elastic IP address is for use in either the EC2-Classic platform or in @@ -971,6 +981,9 @@ func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *reques // an Elastic IP address with an instance or network interface that has an existing // Elastic IP address. // +// [Subnets in Wavelength Zones] You can associate an IP address from the telecommunication +// carrier to the instance or network interface. +// // You cannot associate an Elastic IP address with an interface in a different // network border group. // @@ -1057,6 +1070,12 @@ func (c *EC2) AssociateClientVpnTargetNetworkRequest(input *AssociateClientVpnTa // Zone. We recommend that you associate at least two subnets to provide Availability // Zone redundancy. // +// If you specified a VPC when you created the Client VPN endpoint or if you +// have previous subnet associations, the specified subnet must be in the same +// VPC. To specify a subnet that's in a different VPC, you must first modify +// the Client VPN endpoint (ModifyClientVpnEndpoint) and change the VPC that's +// associated with it. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1171,6 +1190,98 @@ func (c *EC2) AssociateDhcpOptionsWithContext(ctx aws.Context, input *AssociateD return out, req.Send() } +const opAssociateEnclaveCertificateIamRole = "AssociateEnclaveCertificateIamRole" + +// AssociateEnclaveCertificateIamRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssociateEnclaveCertificateIamRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateEnclaveCertificateIamRole for more information on using the AssociateEnclaveCertificateIamRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateEnclaveCertificateIamRoleRequest method. +// req, resp := client.AssociateEnclaveCertificateIamRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateEnclaveCertificateIamRole +func (c *EC2) AssociateEnclaveCertificateIamRoleRequest(input *AssociateEnclaveCertificateIamRoleInput) (req *request.Request, output *AssociateEnclaveCertificateIamRoleOutput) { + op := &request.Operation{ + Name: opAssociateEnclaveCertificateIamRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateEnclaveCertificateIamRoleInput{} + } + + output = &AssociateEnclaveCertificateIamRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateEnclaveCertificateIamRole API operation for Amazon Elastic Compute Cloud. +// +// Associates an AWS Identity and Access Management (IAM) role with an AWS Certificate +// Manager (ACM) certificate. This enables the certificate to be used by the +// ACM for Nitro Enclaves application inside an enclave. For more information, +// see AWS Certificate Manager for Nitro Enclaves (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html) +// in the AWS Nitro Enclaves User Guide. +// +// When the IAM role is associated with the ACM certificate, places the certificate, +// certificate chain, and encrypted private key in an Amazon S3 bucket that +// only the associated IAM role can access. The private key of the certificate +// is encrypted with an AWS-managed KMS customer master (CMK) that has an attached +// attestation-based CMK policy. +// +// To enable the IAM role to access the Amazon S3 object, you must grant it +// permission to call s3:GetObject on the Amazon S3 bucket returned by the command. +// To enable the IAM role to access the AWS KMS CMK, you must grant it permission +// to call kms:Decrypt on AWS KMS CMK returned by the command. For more information, +// see Grant the role permission to access the certificate and encryption key +// (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html#add-policy) +// in the AWS Nitro Enclaves User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation AssociateEnclaveCertificateIamRole for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/AssociateEnclaveCertificateIamRole +func (c *EC2) AssociateEnclaveCertificateIamRole(input *AssociateEnclaveCertificateIamRoleInput) (*AssociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.AssociateEnclaveCertificateIamRoleRequest(input) + return out, req.Send() +} + +// AssociateEnclaveCertificateIamRoleWithContext is the same as AssociateEnclaveCertificateIamRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateEnclaveCertificateIamRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) AssociateEnclaveCertificateIamRoleWithContext(ctx aws.Context, input *AssociateEnclaveCertificateIamRoleInput, opts ...request.Option) (*AssociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.AssociateEnclaveCertificateIamRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAssociateIamInstanceProfile = "AssociateIamInstanceProfile" // AssociateIamInstanceProfileRequest generates a "aws/request.Request" representing the @@ -1603,8 +1714,13 @@ func (c *EC2) AssociateVpcCidrBlockRequest(input *AssociateVpcCidrBlockInput) (r // AssociateVpcCidrBlock API operation for Amazon Elastic Compute Cloud. // // Associates a CIDR block with your VPC. You can associate a secondary IPv4 -// CIDR block, or you can associate an Amazon-provided IPv6 CIDR block. The -// IPv6 CIDR block size is fixed at /56. +// CIDR block, an Amazon-provided IPv6 CIDR block, or an IPv6 CIDR block from +// an IPv6 address pool that you provisioned through bring your own IP addresses +// (BYOIP (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html)). +// The IPv6 CIDR block size is fixed at /56. +// +// You must specify one of the following in the request: an IPv4 CIDR block, +// an IPv6 pool, or an Amazon-provided IPv6 CIDR block. // // For more information about associating CIDR blocks with your VPC and applicable // restrictions, see VPC and Subnet Sizing (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#VPC_Sizing) @@ -1927,7 +2043,7 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques // in the Amazon Elastic Compute Cloud User Guide. // // After you attach an EBS volume, you must make it available. For more information, -// see Making an EBS Volume Available For Use (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html). +// see Making an EBS volume available for use (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html). // // If a volume has an AWS Marketplace product code: // @@ -1941,7 +2057,7 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques // the product. For example, you can't detach a volume from a Windows instance // and attach it to a Linux instance. // -// For more information, see Attaching Amazon EBS Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) +// For more information, see Attaching Amazon EBS volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3309,7 +3425,7 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques // Snapshots created by copying another snapshot have an arbitrary volume ID // that should not be used for any purpose. // -// For more information, see Copying an Amazon EBS Snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) +// For more information, see Copying an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3437,6 +3553,82 @@ func (c *EC2) CreateCapacityReservationWithContext(ctx aws.Context, input *Creat return out, req.Send() } +const opCreateCarrierGateway = "CreateCarrierGateway" + +// CreateCarrierGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateCarrierGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCarrierGateway for more information on using the CreateCarrierGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCarrierGatewayRequest method. +// req, resp := client.CreateCarrierGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCarrierGateway +func (c *EC2) CreateCarrierGatewayRequest(input *CreateCarrierGatewayInput) (req *request.Request, output *CreateCarrierGatewayOutput) { + op := &request.Operation{ + Name: opCreateCarrierGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCarrierGatewayInput{} + } + + output = &CreateCarrierGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCarrierGateway API operation for Amazon Elastic Compute Cloud. +// +// Creates a carrier gateway. For more information about carrier gateways, see +// Carrier gateways (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#wavelength-carrier-gateway) +// in the AWS Wavelength Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateCarrierGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCarrierGateway +func (c *EC2) CreateCarrierGateway(input *CreateCarrierGatewayInput) (*CreateCarrierGatewayOutput, error) { + req, out := c.CreateCarrierGatewayRequest(input) + return out, req.Send() +} + +// CreateCarrierGatewayWithContext is the same as CreateCarrierGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCarrierGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateCarrierGatewayWithContext(ctx aws.Context, input *CreateCarrierGatewayInput, opts ...request.Option) (*CreateCarrierGatewayOutput, error) { + req, out := c.CreateCarrierGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateClientVpnEndpoint = "CreateClientVpnEndpoint" // CreateClientVpnEndpointRequest generates a "aws/request.Request" representing the @@ -3637,7 +3829,7 @@ func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (r // Provides information to AWS about your VPN customer gateway device. The customer // gateway is the appliance at your end of the VPN connection. (The device on // the AWS side of the VPN connection is the virtual private gateway.) You must -// provide the Internet-routable IP address of the customer gateway's external +// provide the internet-routable IP address of the customer gateway's external // interface. The IP address must be static and can be behind a device performing // network address translation (NAT). // @@ -3646,9 +3838,16 @@ func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (r // ASN assigned to your network. If you don't have an ASN already, you can use // a private ASN (in the 64512 - 65534 range). // -// Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with -// the exception of 7224, which is reserved in the us-east-1 Region, and 9059, -// which is reserved in the eu-west-1 Region. +// Amazon EC2 supports all 4-byte ASN numbers in the range of 1 - 2147483647, +// with the exception of the following: +// +// * 7224 - reserved in the us-east-1 Region +// +// * 9059 - reserved in the eu-west-1 Region +// +// * 17943 - reserved in the ap-southeast-1 Region +// +// * 10124 - reserved in the ap-northeast-1 Region // // For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) // in the AWS Site-to-Site VPN User Guide. @@ -3910,13 +4109,13 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // * domain-name - If you're using AmazonProvidedDNS in us-east-1, specify // ec2.internal. If you're using AmazonProvidedDNS in another Region, specify // region.compute.internal (for example, ap-northeast-1.compute.internal). -// Otherwise, specify a domain name (for example, MyCompany.com). This value -// is used to complete unqualified DNS hostnames. Important: Some Linux operating -// systems accept multiple domain names separated by spaces. However, Windows -// and other Linux operating systems treat the value as a single domain, -// which results in unexpected behavior. If your DHCP options set is associated -// with a VPC that has instances with multiple operating systems, specify -// only one domain name. +// Otherwise, specify a domain name (for example, ExampleCompany.com). This +// value is used to complete unqualified DNS hostnames. Important: Some Linux +// operating systems accept multiple domain names separated by spaces. However, +// Windows and other Linux operating systems treat the value as a single +// domain, which results in unexpected behavior. If your DHCP options set +// is associated with a VPC that has instances with multiple operating systems, +// specify only one domain name. // // * ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) // servers. @@ -4416,7 +4615,7 @@ func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInp // CreateInstanceExportTask API operation for Amazon Elastic Compute Cloud. // -// Exports a running or stopped instance to an S3 bucket. +// Exports a running or stopped instance to an Amazon S3 bucket. // // For information about the supported operating systems, image formats, and // known limitations for the types of instances you can export, see Exporting @@ -4662,6 +4861,8 @@ func (c *EC2) CreateLaunchTemplateRequest(input *CreateLaunchTemplateInput) (req // Creates a launch template. A launch template contains the parameters to launch // an instance. When you launch an instance using RunInstances, you can specify // a launch template instead of providing the launch parameters in the request. +// For more information, see Launching an instance from a launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)in +// the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4741,6 +4942,9 @@ func (c *EC2) CreateLaunchTemplateVersionRequest(input *CreateLaunchTemplateVers // Launch template versions are numbered in the order in which they are created. // You cannot specify, change, or replace the numbering of launch template versions. // +// For more information, see Managing launch template versions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#manage-launch-template-versions)in +// the Amazon Elastic Compute Cloud User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4917,6 +5121,84 @@ func (c *EC2) CreateLocalGatewayRouteTableVpcAssociationWithContext(ctx aws.Cont return out, req.Send() } +const opCreateManagedPrefixList = "CreateManagedPrefixList" + +// CreateManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the CreateManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateManagedPrefixList for more information on using the CreateManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateManagedPrefixListRequest method. +// req, resp := client.CreateManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateManagedPrefixList +func (c *EC2) CreateManagedPrefixListRequest(input *CreateManagedPrefixListInput) (req *request.Request, output *CreateManagedPrefixListOutput) { + op := &request.Operation{ + Name: opCreateManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateManagedPrefixListInput{} + } + + output = &CreateManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Creates a managed prefix list. You can specify one or more entries for the +// prefix list. Each entry consists of a CIDR block and an optional description. +// +// You must specify the maximum number of entries for the prefix list. The maximum +// number of entries cannot be changed later. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateManagedPrefixList +func (c *EC2) CreateManagedPrefixList(input *CreateManagedPrefixListInput) (*CreateManagedPrefixListOutput, error) { + req, out := c.CreateManagedPrefixListRequest(input) + return out, req.Send() +} + +// CreateManagedPrefixListWithContext is the same as CreateManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See CreateManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateManagedPrefixListWithContext(ctx aws.Context, input *CreateManagedPrefixListInput, opts ...request.Option) (*CreateManagedPrefixListOutput, error) { + req, out := c.CreateManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateNatGateway = "CreateNatGateway" // CreateNatGatewayRequest generates a "aws/request.Request" representing the @@ -5361,7 +5643,6 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req output = &CreatePlacementGroupOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5377,7 +5658,7 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req // instances in one partition do not share the same hardware with instances // in another partition. // -// For more information, see Placement Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5844,7 +6125,7 @@ func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Re // protected. // // You can tag your snapshots during creation. For more information, see Tagging -// Your Amazon EC2 Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) // in the Amazon Elastic Compute Cloud User Guide. // // For more information, see Amazon Elastic Block Store (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) @@ -6002,7 +6283,7 @@ func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSub // // Creates a data feed for Spot Instances, enabling you to view Spot Instance // usage logs. You can create one data feed per AWS account. For more information, -// see Spot Instance Data Feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// see Spot Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6077,15 +6358,12 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // CreateSubnet API operation for Amazon Elastic Compute Cloud. // -// Creates a subnet in an existing VPC. +// Creates a subnet in a specified VPC. // -// When you create each subnet, you provide the VPC ID and IPv4 CIDR block for -// the subnet. After you create a subnet, you can't change its CIDR block. The -// size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR -// block, or a subset of a VPC's IPv4 CIDR block. If you create more than one -// subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest -// IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), -// and the largest uses a /16 netmask (65,536 IPv4 addresses). +// You must specify an IPv4 CIDR block for the subnet. After you create a subnet, +// you can't change its CIDR block. The allowed block size is between a /16 +// netmask (65,536 IP addresses) and /28 netmask (16 IP addresses). The CIDR +// block must not overlap with the CIDR block of an existing subnet in the VPC. // // If you've associated an IPv6 CIDR block with your VPC, you can create a subnet // with an IPv6 CIDR block that uses a /64 prefix length. @@ -6096,9 +6374,7 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // If you add more than one subnet to a VPC, they're set up in a star topology // with a logical router in the middle. // -// If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP -// address doesn't change if you stop and restart the instance (unlike a similar -// instance launched outside a VPC, which gets a new IP address when restarted). +// When you stop an instance in a subnet, it retains its private IPv4 address. // It's therefore possible to have a subnet with no running instances (they're // all stopped), but no remaining IP addresses available. // @@ -6178,9 +6454,10 @@ func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, o // CreateTags API operation for Amazon Elastic Compute Cloud. // -// Adds or overwrites the specified tags for the specified Amazon EC2 resource -// or resources. Each resource can have a maximum of 50 tags. Each tag consists -// of a key and optional value. Tag keys must be unique per resource. +// Adds or overwrites only the specified tags for the specified Amazon EC2 resource +// or resources. When you specify an existing tag key, the value is overwritten +// with the new value. Each resource can have a maximum of 50 tags. Each tag +// consists of a key and optional value. Tag keys must be unique per resource. // // For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) // in the Amazon Elastic Compute Cloud User Guide. For more information about @@ -6797,6 +7074,81 @@ func (c *EC2) CreateTransitGatewayPeeringAttachmentWithContext(ctx aws.Context, return out, req.Send() } +const opCreateTransitGatewayPrefixListReference = "CreateTransitGatewayPrefixListReference" + +// CreateTransitGatewayPrefixListReferenceRequest generates a "aws/request.Request" representing the +// client's request for the CreateTransitGatewayPrefixListReference operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTransitGatewayPrefixListReference for more information on using the CreateTransitGatewayPrefixListReference +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTransitGatewayPrefixListReferenceRequest method. +// req, resp := client.CreateTransitGatewayPrefixListReferenceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayPrefixListReference +func (c *EC2) CreateTransitGatewayPrefixListReferenceRequest(input *CreateTransitGatewayPrefixListReferenceInput) (req *request.Request, output *CreateTransitGatewayPrefixListReferenceOutput) { + op := &request.Operation{ + Name: opCreateTransitGatewayPrefixListReference, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTransitGatewayPrefixListReferenceInput{} + } + + output = &CreateTransitGatewayPrefixListReferenceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTransitGatewayPrefixListReference API operation for Amazon Elastic Compute Cloud. +// +// Creates a reference (route) to a prefix list in a specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTransitGatewayPrefixListReference for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayPrefixListReference +func (c *EC2) CreateTransitGatewayPrefixListReference(input *CreateTransitGatewayPrefixListReferenceInput) (*CreateTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.CreateTransitGatewayPrefixListReferenceRequest(input) + return out, req.Send() +} + +// CreateTransitGatewayPrefixListReferenceWithContext is the same as CreateTransitGatewayPrefixListReference with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTransitGatewayPrefixListReference for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTransitGatewayPrefixListReferenceWithContext(ctx aws.Context, input *CreateTransitGatewayPrefixListReferenceInput, opts ...request.Option) (*CreateTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.CreateTransitGatewayPrefixListReferenceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateTransitGatewayRoute = "CreateTransitGatewayRoute" // CreateTransitGatewayRouteRequest generates a "aws/request.Request" representing the @@ -7085,10 +7437,10 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques // in the Amazon Elastic Compute Cloud User Guide. // // You can tag your volumes during creation. For more information, see Tagging -// Your Amazon EC2 Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For more information, see Creating an Amazon EBS Volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) +// For more information, see Creating an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7169,9 +7521,10 @@ func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, out // make your VPC, see Your VPC and Subnets (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) // in the Amazon Virtual Private Cloud User Guide. // -// You can optionally request an Amazon-provided IPv6 CIDR block for the VPC. -// The IPv6 CIDR block uses a /56 prefix length, and is allocated from Amazon's -// pool of IPv6 addresses. You cannot choose the IPv6 range for your VPC. +// You can optionally request an IPv6 CIDR block for the VPC. You can request +// an Amazon-provided IPv6 CIDR block from Amazon's pool of IPv6 addresses, +// or an IPv6 CIDR block from an IPv6 address pool that you provisioned through +// bring your own IP addresses (BYOIP (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html)). // // By default, each instance you launch in the VPC has the default DHCP options, // which include only a default DNS server that we provide (AmazonProvidedDNS). @@ -7600,15 +7953,15 @@ func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req * // CreateVpnConnection API operation for Amazon Elastic Compute Cloud. // -// Creates a VPN connection between an existing virtual private gateway and -// a VPN customer gateway. The supported connection type is ipsec.1. +// Creates a VPN connection between an existing virtual private gateway or transit +// gateway and a customer gateway. The supported connection type is ipsec.1. // // The response includes information that you need to give to your network administrator // to configure your customer gateway. // // We strongly recommend that you use HTTPS when calling this operation because // the response contains sensitive cryptographic information for configuring -// your customer gateway. +// your customer gateway device. // // If you decide to shut down your VPN connection for any reason and later create // a new VPN connection, you must reconfigure your customer gateway with the @@ -7808,6 +8161,84 @@ func (c *EC2) CreateVpnGatewayWithContext(ctx aws.Context, input *CreateVpnGatew return out, req.Send() } +const opDeleteCarrierGateway = "DeleteCarrierGateway" + +// DeleteCarrierGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCarrierGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCarrierGateway for more information on using the DeleteCarrierGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCarrierGatewayRequest method. +// req, resp := client.DeleteCarrierGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCarrierGateway +func (c *EC2) DeleteCarrierGatewayRequest(input *DeleteCarrierGatewayInput) (req *request.Request, output *DeleteCarrierGatewayOutput) { + op := &request.Operation{ + Name: opDeleteCarrierGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCarrierGatewayInput{} + } + + output = &DeleteCarrierGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteCarrierGateway API operation for Amazon Elastic Compute Cloud. +// +// Deletes a carrier gateway. +// +// If you do not delete the route that contains the carrier gateway as the Target, +// the route is a blackhole route. For information about how to delete a route, +// see DeleteRoute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteRoute.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteCarrierGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCarrierGateway +func (c *EC2) DeleteCarrierGateway(input *DeleteCarrierGatewayInput) (*DeleteCarrierGatewayOutput, error) { + req, out := c.DeleteCarrierGatewayRequest(input) + return out, req.Send() +} + +// DeleteCarrierGatewayWithContext is the same as DeleteCarrierGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCarrierGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteCarrierGatewayWithContext(ctx aws.Context, input *DeleteCarrierGatewayInput, opts ...request.Option) (*DeleteCarrierGatewayOutput, error) { + req, out := c.DeleteCarrierGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteClientVpnEndpoint = "DeleteClientVpnEndpoint" // DeleteClientVpnEndpointRequest generates a "aws/request.Request" representing the @@ -8868,6 +9299,81 @@ func (c *EC2) DeleteLocalGatewayRouteTableVpcAssociationWithContext(ctx aws.Cont return out, req.Send() } +const opDeleteManagedPrefixList = "DeleteManagedPrefixList" + +// DeleteManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the DeleteManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteManagedPrefixList for more information on using the DeleteManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteManagedPrefixListRequest method. +// req, resp := client.DeleteManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteManagedPrefixList +func (c *EC2) DeleteManagedPrefixListRequest(input *DeleteManagedPrefixListInput) (req *request.Request, output *DeleteManagedPrefixListOutput) { + op := &request.Operation{ + Name: opDeleteManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteManagedPrefixListInput{} + } + + output = &DeleteManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified managed prefix list. You must first remove all references +// to the prefix list in your resources. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteManagedPrefixList +func (c *EC2) DeleteManagedPrefixList(input *DeleteManagedPrefixListInput) (*DeleteManagedPrefixListOutput, error) { + req, out := c.DeleteManagedPrefixListRequest(input) + return out, req.Send() +} + +// DeleteManagedPrefixListWithContext is the same as DeleteManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteManagedPrefixListWithContext(ctx aws.Context, input *DeleteManagedPrefixListInput, opts ...request.Option) (*DeleteManagedPrefixListOutput, error) { + req, out := c.DeleteManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteNatGateway = "DeleteNatGateway" // DeleteNatGatewayRequest generates a "aws/request.Request" representing the @@ -9296,7 +9802,7 @@ func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req // // Deletes the specified placement group. You must terminate all instances in // the placement group before you can delete the placement group. For more information, -// see Placement Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9690,7 +10196,7 @@ func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Re // a registered AMI. You must first de-register the AMI before you can delete // the snapshot. // -// For more information, see Deleting an Amazon EBS Snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html) +// For more information, see Deleting an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10475,6 +10981,81 @@ func (c *EC2) DeleteTransitGatewayPeeringAttachmentWithContext(ctx aws.Context, return out, req.Send() } +const opDeleteTransitGatewayPrefixListReference = "DeleteTransitGatewayPrefixListReference" + +// DeleteTransitGatewayPrefixListReferenceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTransitGatewayPrefixListReference operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTransitGatewayPrefixListReference for more information on using the DeleteTransitGatewayPrefixListReference +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTransitGatewayPrefixListReferenceRequest method. +// req, resp := client.DeleteTransitGatewayPrefixListReferenceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayPrefixListReference +func (c *EC2) DeleteTransitGatewayPrefixListReferenceRequest(input *DeleteTransitGatewayPrefixListReferenceInput) (req *request.Request, output *DeleteTransitGatewayPrefixListReferenceOutput) { + op := &request.Operation{ + Name: opDeleteTransitGatewayPrefixListReference, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTransitGatewayPrefixListReferenceInput{} + } + + output = &DeleteTransitGatewayPrefixListReferenceOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTransitGatewayPrefixListReference API operation for Amazon Elastic Compute Cloud. +// +// Deletes a reference (route) to a prefix list in a specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTransitGatewayPrefixListReference for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayPrefixListReference +func (c *EC2) DeleteTransitGatewayPrefixListReference(input *DeleteTransitGatewayPrefixListReferenceInput) (*DeleteTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.DeleteTransitGatewayPrefixListReferenceRequest(input) + return out, req.Send() +} + +// DeleteTransitGatewayPrefixListReferenceWithContext is the same as DeleteTransitGatewayPrefixListReference with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTransitGatewayPrefixListReference for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTransitGatewayPrefixListReferenceWithContext(ctx aws.Context, input *DeleteTransitGatewayPrefixListReferenceInput, opts ...request.Option) (*DeleteTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.DeleteTransitGatewayPrefixListReferenceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteTransitGatewayRoute = "DeleteTransitGatewayRoute" // DeleteTransitGatewayRouteRequest generates a "aws/request.Request" representing the @@ -10749,7 +11330,7 @@ func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Reques // // The volume can remain in the deleting state for several minutes. // -// For more information, see Deleting an Amazon EBS Volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) +// For more information, see Deleting an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -11218,9 +11799,13 @@ func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req * // your VPN connection have been compromised, you can delete the VPN connection // and create a new one that has new keys, without needing to delete the VPC // or virtual private gateway. If you create a new VPN connection, you must -// reconfigure the customer gateway using the new configuration information +// reconfigure the customer gateway device using the new configuration information // returned with the new VPN connection ID. // +// For certificate-based authentication, delete all AWS Certificate Manager +// (ACM) private certificates used for the AWS-side tunnel endpoints for the +// VPN connection before deleting the VPN connection. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -11372,11 +11957,10 @@ func (c *EC2) DeleteVpnGatewayRequest(input *DeleteVpnGatewayInput) (req *reques // DeleteVpnGateway API operation for Amazon Elastic Compute Cloud. // -// Deletes the specified virtual private gateway. We recommend that before you -// delete a virtual private gateway, you detach it from the VPC and delete the -// VPN connection. Note that you don't need to delete the virtual private gateway -// if you plan to delete and recreate the VPN connection between your VPC and -// your network. +// Deletes the specified virtual private gateway. You must first detach the +// virtual private gateway from the VPC. Note that you don't need to delete +// the virtual private gateway if you plan to delete and recreate the VPN connection +// between your VPC and your network. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11569,6 +12153,81 @@ func (c *EC2) DeregisterImageWithContext(ctx aws.Context, input *DeregisterImage return out, req.Send() } +const opDeregisterInstanceEventNotificationAttributes = "DeregisterInstanceEventNotificationAttributes" + +// DeregisterInstanceEventNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterInstanceEventNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterInstanceEventNotificationAttributes for more information on using the DeregisterInstanceEventNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterInstanceEventNotificationAttributesRequest method. +// req, resp := client.DeregisterInstanceEventNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterInstanceEventNotificationAttributes +func (c *EC2) DeregisterInstanceEventNotificationAttributesRequest(input *DeregisterInstanceEventNotificationAttributesInput) (req *request.Request, output *DeregisterInstanceEventNotificationAttributesOutput) { + op := &request.Operation{ + Name: opDeregisterInstanceEventNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterInstanceEventNotificationAttributesInput{} + } + + output = &DeregisterInstanceEventNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Deregisters tag keys to prevent tags that have the specified tag keys from +// being included in scheduled event notifications for resources in the Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeregisterInstanceEventNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterInstanceEventNotificationAttributes +func (c *EC2) DeregisterInstanceEventNotificationAttributes(input *DeregisterInstanceEventNotificationAttributesInput) (*DeregisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.DeregisterInstanceEventNotificationAttributesRequest(input) + return out, req.Send() +} + +// DeregisterInstanceEventNotificationAttributesWithContext is the same as DeregisterInstanceEventNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterInstanceEventNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeregisterInstanceEventNotificationAttributesWithContext(ctx aws.Context, input *DeregisterInstanceEventNotificationAttributesInput, opts ...request.Option) (*DeregisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.DeregisterInstanceEventNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeregisterTransitGatewayMulticastGroupMembers = "DeregisterTransitGatewayMulticastGroupMembers" // DeregisterTransitGatewayMulticastGroupMembersRequest generates a "aws/request.Request" representing the @@ -12023,13 +12682,12 @@ func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesI // DescribeAvailabilityZones API operation for Amazon Elastic Compute Cloud. // -// Describes the Availability Zones and Local Zones that are available to you. -// If there is an event impacting an Availability Zone or Local Zone, you can -// use this request to view the state and any provided messages for that Availability -// Zone or Local Zone. +// Describes the Availability Zones, Local Zones, and Wavelength Zones that +// are available to you. If there is an event impacting a zone, you can use +// this request to view the state and any provided messages for that zone. // -// For more information about Availability Zones and Local Zones, see Regions -// and Availability Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) +// For more information about Availability Zones, Local Zones, and Wavelength +// Zones, see Regions, Zones and Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -12192,7 +12850,7 @@ func (c *EC2) DescribeByoipCidrsRequest(input *DescribeByoipCidrsInput) (req *re // Describes the IP address ranges that were specified in calls to ProvisionByoipCidr. // // To describe the address pools that were created when you provisioned the -// address ranges, use DescribePublicIpv4Pools. +// address ranges, use DescribePublicIpv4Pools or DescribeIpv6Pools. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12407,6 +13065,138 @@ func (c *EC2) DescribeCapacityReservationsPagesWithContext(ctx aws.Context, inpu return p.Err() } +const opDescribeCarrierGateways = "DescribeCarrierGateways" + +// DescribeCarrierGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCarrierGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCarrierGateways for more information on using the DescribeCarrierGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCarrierGatewaysRequest method. +// req, resp := client.DescribeCarrierGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCarrierGateways +func (c *EC2) DescribeCarrierGatewaysRequest(input *DescribeCarrierGatewaysInput) (req *request.Request, output *DescribeCarrierGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeCarrierGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCarrierGatewaysInput{} + } + + output = &DescribeCarrierGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCarrierGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your carrier gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeCarrierGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCarrierGateways +func (c *EC2) DescribeCarrierGateways(input *DescribeCarrierGatewaysInput) (*DescribeCarrierGatewaysOutput, error) { + req, out := c.DescribeCarrierGatewaysRequest(input) + return out, req.Send() +} + +// DescribeCarrierGatewaysWithContext is the same as DescribeCarrierGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCarrierGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCarrierGatewaysWithContext(ctx aws.Context, input *DescribeCarrierGatewaysInput, opts ...request.Option) (*DescribeCarrierGatewaysOutput, error) { + req, out := c.DescribeCarrierGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeCarrierGatewaysPages iterates over the pages of a DescribeCarrierGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCarrierGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCarrierGateways operation. +// pageNum := 0 +// err := client.DescribeCarrierGatewaysPages(params, +// func(page *ec2.DescribeCarrierGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeCarrierGatewaysPages(input *DescribeCarrierGatewaysInput, fn func(*DescribeCarrierGatewaysOutput, bool) bool) error { + return c.DescribeCarrierGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCarrierGatewaysPagesWithContext same as DescribeCarrierGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCarrierGatewaysPagesWithContext(ctx aws.Context, input *DescribeCarrierGatewaysInput, fn func(*DescribeCarrierGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCarrierGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCarrierGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCarrierGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" // DescribeClassicLinkInstancesRequest generates a "aws/request.Request" representing the @@ -13234,6 +14024,12 @@ func (c *EC2) DescribeCoipPoolsRequest(input *DescribeCoipPoolsInput) (req *requ Name: opDescribeCoipPools, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -13278,6 +14074,58 @@ func (c *EC2) DescribeCoipPoolsWithContext(ctx aws.Context, input *DescribeCoipP return out, req.Send() } +// DescribeCoipPoolsPages iterates over the pages of a DescribeCoipPools operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCoipPools method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCoipPools operation. +// pageNum := 0 +// err := client.DescribeCoipPoolsPages(params, +// func(page *ec2.DescribeCoipPoolsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeCoipPoolsPages(input *DescribeCoipPoolsInput, fn func(*DescribeCoipPoolsOutput, bool) bool) error { + return c.DescribeCoipPoolsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCoipPoolsPagesWithContext same as DescribeCoipPoolsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCoipPoolsPagesWithContext(ctx aws.Context, input *DescribeCoipPoolsInput, fn func(*DescribeCoipPoolsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCoipPoolsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCoipPoolsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCoipPoolsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeConversionTasks = "DescribeConversionTasks" // DescribeConversionTasksRequest generates a "aws/request.Request" representing the @@ -13826,7 +14674,7 @@ func (c *EC2) DescribeExportImageTasksRequest(input *DescribeExportImageTasksInp // DescribeExportImageTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export image tasks or all your export image tasks. +// Describes the specified export image tasks or all of your export image tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13952,7 +14800,7 @@ func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req * // DescribeExportTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export instance tasks or all your export instance +// Describes the specified export instance tasks or all of your export instance // tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16037,7 +16885,7 @@ func (c *EC2) DescribeInstanceCreditSpecificationsRequest(input *DescribeInstanc // all, the call fails. If you specify only instance IDs in an unaffected zone, // the call works normally. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16120,6 +16968,81 @@ func (c *EC2) DescribeInstanceCreditSpecificationsPagesWithContext(ctx aws.Conte return p.Err() } +const opDescribeInstanceEventNotificationAttributes = "DescribeInstanceEventNotificationAttributes" + +// DescribeInstanceEventNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceEventNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceEventNotificationAttributes for more information on using the DescribeInstanceEventNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceEventNotificationAttributesRequest method. +// req, resp := client.DescribeInstanceEventNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceEventNotificationAttributes +func (c *EC2) DescribeInstanceEventNotificationAttributesRequest(input *DescribeInstanceEventNotificationAttributesInput) (req *request.Request, output *DescribeInstanceEventNotificationAttributesOutput) { + op := &request.Operation{ + Name: opDescribeInstanceEventNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceEventNotificationAttributesInput{} + } + + output = &DescribeInstanceEventNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Describes the tag keys that are registered to appear in scheduled event notifications +// for resources in the current Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstanceEventNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceEventNotificationAttributes +func (c *EC2) DescribeInstanceEventNotificationAttributes(input *DescribeInstanceEventNotificationAttributesInput) (*DescribeInstanceEventNotificationAttributesOutput, error) { + req, out := c.DescribeInstanceEventNotificationAttributesRequest(input) + return out, req.Send() +} + +// DescribeInstanceEventNotificationAttributesWithContext is the same as DescribeInstanceEventNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceEventNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceEventNotificationAttributesWithContext(ctx aws.Context, input *DescribeInstanceEventNotificationAttributesInput, opts ...request.Option) (*DescribeInstanceEventNotificationAttributesOutput, error) { + req, out := c.DescribeInstanceEventNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeInstanceStatus = "DescribeInstanceStatus" // DescribeInstanceStatusRequest generates a "aws/request.Request" representing the @@ -16178,18 +17101,18 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) // // * Status checks - Amazon EC2 performs status checks on running EC2 instances // to identify hardware and software issues. For more information, see Status -// Checks for Your Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) -// and Troubleshooting Instances with Failed Status Checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) +// checks for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) +// and Troubleshooting instances with failed status checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) // in the Amazon Elastic Compute Cloud User Guide. // // * Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, // or terminate) for your instances related to hardware issues, software -// updates, or system maintenance. For more information, see Scheduled Events -// for Your Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) +// updates, or system maintenance. For more information, see Scheduled events +// for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) // in the Amazon Elastic Compute Cloud User Guide. // // * Instance state - You can manage your instances from the moment you launch -// them through their termination. For more information, see Instance Lifecycle +// them through their termination. For more information, see Instance lifecycle // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -16304,6 +17227,12 @@ func (c *EC2) DescribeInstanceTypeOfferingsRequest(input *DescribeInstanceTypeOf Name: opDescribeInstanceTypeOfferings, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -16349,6 +17278,58 @@ func (c *EC2) DescribeInstanceTypeOfferingsWithContext(ctx aws.Context, input *D return out, req.Send() } +// DescribeInstanceTypeOfferingsPages iterates over the pages of a DescribeInstanceTypeOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceTypeOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceTypeOfferings operation. +// pageNum := 0 +// err := client.DescribeInstanceTypeOfferingsPages(params, +// func(page *ec2.DescribeInstanceTypeOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInstanceTypeOfferingsPages(input *DescribeInstanceTypeOfferingsInput, fn func(*DescribeInstanceTypeOfferingsOutput, bool) bool) error { + return c.DescribeInstanceTypeOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceTypeOfferingsPagesWithContext same as DescribeInstanceTypeOfferingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceTypeOfferingsPagesWithContext(ctx aws.Context, input *DescribeInstanceTypeOfferingsInput, fn func(*DescribeInstanceTypeOfferingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceTypeOfferingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceTypeOfferingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceTypeOfferingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeInstanceTypes = "DescribeInstanceTypes" // DescribeInstanceTypesRequest generates a "aws/request.Request" representing the @@ -16380,6 +17361,12 @@ func (c *EC2) DescribeInstanceTypesRequest(input *DescribeInstanceTypesInput) (r Name: opDescribeInstanceTypes, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -16393,7 +17380,7 @@ func (c *EC2) DescribeInstanceTypesRequest(input *DescribeInstanceTypesInput) (r // DescribeInstanceTypes API operation for Amazon Elastic Compute Cloud. // -// Returns a list of all instance types offered in your current AWS Region. +// Describes the details of the instance types that are offered in a location. // The results can be filtered by the attributes of the instance types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16424,6 +17411,58 @@ func (c *EC2) DescribeInstanceTypesWithContext(ctx aws.Context, input *DescribeI return out, req.Send() } +// DescribeInstanceTypesPages iterates over the pages of a DescribeInstanceTypes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceTypes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceTypes operation. +// pageNum := 0 +// err := client.DescribeInstanceTypesPages(params, +// func(page *ec2.DescribeInstanceTypesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeInstanceTypesPages(input *DescribeInstanceTypesInput, fn func(*DescribeInstanceTypesOutput, bool) bool) error { + return c.DescribeInstanceTypesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceTypesPagesWithContext same as DescribeInstanceTypesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceTypesPagesWithContext(ctx aws.Context, input *DescribeInstanceTypesInput, fn func(*DescribeInstanceTypesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceTypesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceTypesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceTypesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeInstances = "DescribeInstances" // DescribeInstancesRequest generates a "aws/request.Request" representing the @@ -16474,13 +17513,17 @@ func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *requ // DescribeInstances API operation for Amazon Elastic Compute Cloud. // -// Describes the specified instances or all of AWS account's instances. +// Describes the specified instances or all instances. +// +// If you specify instance IDs, the output includes information for only the +// specified instances. If you specify filters, the output includes information +// for only those instances that meet the filter criteria. If you do not specify +// instance IDs or filters, the output includes information for all instances, +// which can affect performance. We recommend that you use pagination to ensure +// that the operation returns quickly and successfully. // -// If you specify one or more instance IDs, Amazon EC2 returns information for -// those instances. If you do not specify instance IDs, Amazon EC2 returns information -// for all relevant instances. If you specify an instance ID that is not valid, -// an error is returned. If you specify an instance that you do not own, it -// is not included in the returned results. +// If you specify an instance ID that is not valid, an error is returned. If +// you specify an instance that you do not own, it is not included in the output. // // Recently terminated instances might appear in the returned results. This // interval is usually less than one hour. @@ -16703,6 +17746,138 @@ func (c *EC2) DescribeInternetGatewaysPagesWithContext(ctx aws.Context, input *D return p.Err() } +const opDescribeIpv6Pools = "DescribeIpv6Pools" + +// DescribeIpv6PoolsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIpv6Pools operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeIpv6Pools for more information on using the DescribeIpv6Pools +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeIpv6PoolsRequest method. +// req, resp := client.DescribeIpv6PoolsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpv6Pools +func (c *EC2) DescribeIpv6PoolsRequest(input *DescribeIpv6PoolsInput) (req *request.Request, output *DescribeIpv6PoolsOutput) { + op := &request.Operation{ + Name: opDescribeIpv6Pools, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeIpv6PoolsInput{} + } + + output = &DescribeIpv6PoolsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIpv6Pools API operation for Amazon Elastic Compute Cloud. +// +// Describes your IPv6 address pools. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeIpv6Pools for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeIpv6Pools +func (c *EC2) DescribeIpv6Pools(input *DescribeIpv6PoolsInput) (*DescribeIpv6PoolsOutput, error) { + req, out := c.DescribeIpv6PoolsRequest(input) + return out, req.Send() +} + +// DescribeIpv6PoolsWithContext is the same as DescribeIpv6Pools with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIpv6Pools for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIpv6PoolsWithContext(ctx aws.Context, input *DescribeIpv6PoolsInput, opts ...request.Option) (*DescribeIpv6PoolsOutput, error) { + req, out := c.DescribeIpv6PoolsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeIpv6PoolsPages iterates over the pages of a DescribeIpv6Pools operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeIpv6Pools method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeIpv6Pools operation. +// pageNum := 0 +// err := client.DescribeIpv6PoolsPages(params, +// func(page *ec2.DescribeIpv6PoolsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeIpv6PoolsPages(input *DescribeIpv6PoolsInput, fn func(*DescribeIpv6PoolsOutput, bool) bool) error { + return c.DescribeIpv6PoolsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeIpv6PoolsPagesWithContext same as DescribeIpv6PoolsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeIpv6PoolsPagesWithContext(ctx aws.Context, input *DescribeIpv6PoolsInput, fn func(*DescribeIpv6PoolsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeIpv6PoolsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeIpv6PoolsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeIpv6PoolsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeKeyPairs = "DescribeKeyPairs" // DescribeKeyPairsRequest generates a "aws/request.Request" representing the @@ -16831,7 +18006,9 @@ func (c *EC2) DescribeLaunchTemplateVersionsRequest(input *DescribeLaunchTemplat // DescribeLaunchTemplateVersions API operation for Amazon Elastic Compute Cloud. // // Describes one or more versions of a specified launch template. You can describe -// all versions, individual versions, or a range of versions. +// all versions, individual versions, or a range of versions. You can also describe +// all the latest versions or all the default versions of all the launch templates +// in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -17076,6 +18253,12 @@ func (c *EC2) DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsReq Name: opDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -17120,6 +18303,58 @@ func (c *EC2) DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsWit return out, req.Send() } +// DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages iterates over the pages of a DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages(params, +// func(page *ec2.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages(input *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, fn func(*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, bool) bool) error { + return c.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPagesWithContext same as DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput, fn func(*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeLocalGatewayRouteTableVpcAssociations = "DescribeLocalGatewayRouteTableVpcAssociations" // DescribeLocalGatewayRouteTableVpcAssociationsRequest generates a "aws/request.Request" representing the @@ -17151,6 +18386,12 @@ func (c *EC2) DescribeLocalGatewayRouteTableVpcAssociationsRequest(input *Descri Name: opDescribeLocalGatewayRouteTableVpcAssociations, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -17195,6 +18436,58 @@ func (c *EC2) DescribeLocalGatewayRouteTableVpcAssociationsWithContext(ctx aws.C return out, req.Send() } +// DescribeLocalGatewayRouteTableVpcAssociationsPages iterates over the pages of a DescribeLocalGatewayRouteTableVpcAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayRouteTableVpcAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayRouteTableVpcAssociations operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayRouteTableVpcAssociationsPages(params, +// func(page *ec2.DescribeLocalGatewayRouteTableVpcAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayRouteTableVpcAssociationsPages(input *DescribeLocalGatewayRouteTableVpcAssociationsInput, fn func(*DescribeLocalGatewayRouteTableVpcAssociationsOutput, bool) bool) error { + return c.DescribeLocalGatewayRouteTableVpcAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayRouteTableVpcAssociationsPagesWithContext same as DescribeLocalGatewayRouteTableVpcAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayRouteTableVpcAssociationsPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayRouteTableVpcAssociationsInput, fn func(*DescribeLocalGatewayRouteTableVpcAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayRouteTableVpcAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayRouteTableVpcAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayRouteTableVpcAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeLocalGatewayRouteTables = "DescribeLocalGatewayRouteTables" // DescribeLocalGatewayRouteTablesRequest generates a "aws/request.Request" representing the @@ -17226,6 +18519,12 @@ func (c *EC2) DescribeLocalGatewayRouteTablesRequest(input *DescribeLocalGateway Name: opDescribeLocalGatewayRouteTables, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -17270,6 +18569,58 @@ func (c *EC2) DescribeLocalGatewayRouteTablesWithContext(ctx aws.Context, input return out, req.Send() } +// DescribeLocalGatewayRouteTablesPages iterates over the pages of a DescribeLocalGatewayRouteTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayRouteTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayRouteTables operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayRouteTablesPages(params, +// func(page *ec2.DescribeLocalGatewayRouteTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayRouteTablesPages(input *DescribeLocalGatewayRouteTablesInput, fn func(*DescribeLocalGatewayRouteTablesOutput, bool) bool) error { + return c.DescribeLocalGatewayRouteTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayRouteTablesPagesWithContext same as DescribeLocalGatewayRouteTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayRouteTablesPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayRouteTablesInput, fn func(*DescribeLocalGatewayRouteTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayRouteTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayRouteTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayRouteTablesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeLocalGatewayVirtualInterfaceGroups = "DescribeLocalGatewayVirtualInterfaceGroups" // DescribeLocalGatewayVirtualInterfaceGroupsRequest generates a "aws/request.Request" representing the @@ -17301,6 +18652,12 @@ func (c *EC2) DescribeLocalGatewayVirtualInterfaceGroupsRequest(input *DescribeL Name: opDescribeLocalGatewayVirtualInterfaceGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -17344,6 +18701,58 @@ func (c *EC2) DescribeLocalGatewayVirtualInterfaceGroupsWithContext(ctx aws.Cont return out, req.Send() } +// DescribeLocalGatewayVirtualInterfaceGroupsPages iterates over the pages of a DescribeLocalGatewayVirtualInterfaceGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayVirtualInterfaceGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayVirtualInterfaceGroups operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayVirtualInterfaceGroupsPages(params, +// func(page *ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayVirtualInterfaceGroupsPages(input *DescribeLocalGatewayVirtualInterfaceGroupsInput, fn func(*DescribeLocalGatewayVirtualInterfaceGroupsOutput, bool) bool) error { + return c.DescribeLocalGatewayVirtualInterfaceGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayVirtualInterfaceGroupsPagesWithContext same as DescribeLocalGatewayVirtualInterfaceGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayVirtualInterfaceGroupsPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayVirtualInterfaceGroupsInput, fn func(*DescribeLocalGatewayVirtualInterfaceGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayVirtualInterfaceGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayVirtualInterfaceGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayVirtualInterfaceGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeLocalGatewayVirtualInterfaces = "DescribeLocalGatewayVirtualInterfaces" // DescribeLocalGatewayVirtualInterfacesRequest generates a "aws/request.Request" representing the @@ -17375,6 +18784,12 @@ func (c *EC2) DescribeLocalGatewayVirtualInterfacesRequest(input *DescribeLocalG Name: opDescribeLocalGatewayVirtualInterfaces, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -17418,6 +18833,58 @@ func (c *EC2) DescribeLocalGatewayVirtualInterfacesWithContext(ctx aws.Context, return out, req.Send() } +// DescribeLocalGatewayVirtualInterfacesPages iterates over the pages of a DescribeLocalGatewayVirtualInterfaces operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGatewayVirtualInterfaces method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGatewayVirtualInterfaces operation. +// pageNum := 0 +// err := client.DescribeLocalGatewayVirtualInterfacesPages(params, +// func(page *ec2.DescribeLocalGatewayVirtualInterfacesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewayVirtualInterfacesPages(input *DescribeLocalGatewayVirtualInterfacesInput, fn func(*DescribeLocalGatewayVirtualInterfacesOutput, bool) bool) error { + return c.DescribeLocalGatewayVirtualInterfacesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewayVirtualInterfacesPagesWithContext same as DescribeLocalGatewayVirtualInterfacesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewayVirtualInterfacesPagesWithContext(ctx aws.Context, input *DescribeLocalGatewayVirtualInterfacesInput, fn func(*DescribeLocalGatewayVirtualInterfacesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewayVirtualInterfacesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewayVirtualInterfacesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewayVirtualInterfacesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeLocalGateways = "DescribeLocalGateways" // DescribeLocalGatewaysRequest generates a "aws/request.Request" representing the @@ -17449,6 +18916,12 @@ func (c *EC2) DescribeLocalGatewaysRequest(input *DescribeLocalGatewaysInput) (r Name: opDescribeLocalGateways, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -17493,6 +18966,192 @@ func (c *EC2) DescribeLocalGatewaysWithContext(ctx aws.Context, input *DescribeL return out, req.Send() } +// DescribeLocalGatewaysPages iterates over the pages of a DescribeLocalGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLocalGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLocalGateways operation. +// pageNum := 0 +// err := client.DescribeLocalGatewaysPages(params, +// func(page *ec2.DescribeLocalGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeLocalGatewaysPages(input *DescribeLocalGatewaysInput, fn func(*DescribeLocalGatewaysOutput, bool) bool) error { + return c.DescribeLocalGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeLocalGatewaysPagesWithContext same as DescribeLocalGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeLocalGatewaysPagesWithContext(ctx aws.Context, input *DescribeLocalGatewaysInput, fn func(*DescribeLocalGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeLocalGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeLocalGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeLocalGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeManagedPrefixLists = "DescribeManagedPrefixLists" + +// DescribeManagedPrefixListsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeManagedPrefixLists operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeManagedPrefixLists for more information on using the DescribeManagedPrefixLists +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeManagedPrefixListsRequest method. +// req, resp := client.DescribeManagedPrefixListsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeManagedPrefixLists +func (c *EC2) DescribeManagedPrefixListsRequest(input *DescribeManagedPrefixListsInput) (req *request.Request, output *DescribeManagedPrefixListsOutput) { + op := &request.Operation{ + Name: opDescribeManagedPrefixLists, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeManagedPrefixListsInput{} + } + + output = &DescribeManagedPrefixListsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeManagedPrefixLists API operation for Amazon Elastic Compute Cloud. +// +// Describes your managed prefix lists and any AWS-managed prefix lists. +// +// To view the entries for your prefix list, use GetManagedPrefixListEntries. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeManagedPrefixLists for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeManagedPrefixLists +func (c *EC2) DescribeManagedPrefixLists(input *DescribeManagedPrefixListsInput) (*DescribeManagedPrefixListsOutput, error) { + req, out := c.DescribeManagedPrefixListsRequest(input) + return out, req.Send() +} + +// DescribeManagedPrefixListsWithContext is the same as DescribeManagedPrefixLists with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeManagedPrefixLists for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeManagedPrefixListsWithContext(ctx aws.Context, input *DescribeManagedPrefixListsInput, opts ...request.Option) (*DescribeManagedPrefixListsOutput, error) { + req, out := c.DescribeManagedPrefixListsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeManagedPrefixListsPages iterates over the pages of a DescribeManagedPrefixLists operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeManagedPrefixLists method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeManagedPrefixLists operation. +// pageNum := 0 +// err := client.DescribeManagedPrefixListsPages(params, +// func(page *ec2.DescribeManagedPrefixListsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeManagedPrefixListsPages(input *DescribeManagedPrefixListsInput, fn func(*DescribeManagedPrefixListsOutput, bool) bool) error { + return c.DescribeManagedPrefixListsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeManagedPrefixListsPagesWithContext same as DescribeManagedPrefixListsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeManagedPrefixListsPagesWithContext(ctx aws.Context, input *DescribeManagedPrefixListsInput, fn func(*DescribeManagedPrefixListsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeManagedPrefixListsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeManagedPrefixListsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeManagedPrefixListsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMovingAddresses = "DescribeMovingAddresses" // DescribeMovingAddressesRequest generates a "aws/request.Request" representing the @@ -18278,7 +19937,7 @@ func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput // DescribePlacementGroups API operation for Amazon Elastic Compute Cloud. // // Describes the specified placement groups or all of your placement groups. -// For more information, see Placement Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -18361,10 +20020,9 @@ func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req * // // Describes available AWS services in a prefix list format, which includes // the prefix list name and prefix list ID of the service and the IP address -// range for the service. A prefix list ID is required for creating an outbound -// security group rule that allows traffic from a VPC to access an AWS service -// through a gateway VPC endpoint. Currently, the services that support this -// action are Amazon S3 and Amazon DynamoDB. +// range for the service. +// +// We recommend that you use DescribeManagedPrefixLists instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -20031,7 +21689,7 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ // * implicit: An AWS account has implicit create volume permissions for // all snapshots it owns. // -// The list of snapshots returned can be modified by specifying snapshot IDs, +// The list of snapshots returned can be filtered by specifying snapshot IDs, // snapshot owners, or AWS accounts with create volume permissions. If no options // are specified, Amazon EC2 returns all snapshots for which you have create // volume permissions. @@ -20051,12 +21709,14 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ // (if you own the snapshots), self for snapshots for which you own or have // explicit permissions, or all for public snapshots. // -// If you are describing a long list of snapshots, you can paginate the output -// to make the list more manageable. The MaxResults parameter sets the maximum -// number of results returned in a single page. If the list of results exceeds -// your MaxResults value, then that number of results is returned along with -// a NextToken value that can be passed to a subsequent DescribeSnapshots request -// to retrieve the remaining results. +// If you are describing a long list of snapshots, we recommend that you paginate +// the output to make the list more manageable. The MaxResults parameter sets +// the maximum number of results returned in a single page. If the list of results +// exceeds your MaxResults value, then that number of results is returned along +// with a NextToken value that can be passed to a subsequent DescribeSnapshots +// request to retrieve the remaining results. +// +// To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores. // // For more information about EBS snapshots, see Amazon EBS Snapshots (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -20186,7 +21846,7 @@ func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafee // DescribeSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud. // // Describes the data feed for Spot Instances. For more information, see Spot -// Instance Data Feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -20560,8 +22220,8 @@ func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceReq // You can use DescribeSpotInstanceRequests to find a running Spot Instance // by examining the response. If the status of the Spot Instance is fulfilled, // the instance ID appears in the response and contains the identifier of the -// instance. Alternatively, you can use DescribeInstances with a filter to look -// for instances where the instance lifecycle is spot. +// instance. Alternatively, you can use DescribeInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances) +// with a filter to look for instances where the instance lifecycle is spot. // // We recommend that you set MaxResults to a value between 5 and 1000 to limit // the number of results returned. This paginates the output, which makes the @@ -20704,7 +22364,7 @@ func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInp // DescribeSpotPriceHistory API operation for Amazon Elastic Compute Cloud. // // Describes the Spot price history. For more information, see Spot Instance -// Pricing History (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) +// pricing history (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) // in the Amazon EC2 User Guide for Linux Instances. // // When you specify a start and end time, this operation returns the prices @@ -21760,6 +23420,12 @@ func (c *EC2) DescribeTransitGatewayMulticastDomainsRequest(input *DescribeTrans Name: opDescribeTransitGatewayMulticastDomains, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -21803,6 +23469,58 @@ func (c *EC2) DescribeTransitGatewayMulticastDomainsWithContext(ctx aws.Context, return out, req.Send() } +// DescribeTransitGatewayMulticastDomainsPages iterates over the pages of a DescribeTransitGatewayMulticastDomains operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGatewayMulticastDomains method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGatewayMulticastDomains operation. +// pageNum := 0 +// err := client.DescribeTransitGatewayMulticastDomainsPages(params, +// func(page *ec2.DescribeTransitGatewayMulticastDomainsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewayMulticastDomainsPages(input *DescribeTransitGatewayMulticastDomainsInput, fn func(*DescribeTransitGatewayMulticastDomainsOutput, bool) bool) error { + return c.DescribeTransitGatewayMulticastDomainsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewayMulticastDomainsPagesWithContext same as DescribeTransitGatewayMulticastDomainsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayMulticastDomainsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayMulticastDomainsInput, fn func(*DescribeTransitGatewayMulticastDomainsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewayMulticastDomainsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewayMulticastDomainsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayMulticastDomainsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeTransitGatewayPeeringAttachments = "DescribeTransitGatewayPeeringAttachments" // DescribeTransitGatewayPeeringAttachmentsRequest generates a "aws/request.Request" representing the @@ -21834,6 +23552,12 @@ func (c *EC2) DescribeTransitGatewayPeeringAttachmentsRequest(input *DescribeTra Name: opDescribeTransitGatewayPeeringAttachments, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -21877,6 +23601,58 @@ func (c *EC2) DescribeTransitGatewayPeeringAttachmentsWithContext(ctx aws.Contex return out, req.Send() } +// DescribeTransitGatewayPeeringAttachmentsPages iterates over the pages of a DescribeTransitGatewayPeeringAttachments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTransitGatewayPeeringAttachments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTransitGatewayPeeringAttachments operation. +// pageNum := 0 +// err := client.DescribeTransitGatewayPeeringAttachmentsPages(params, +// func(page *ec2.DescribeTransitGatewayPeeringAttachmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeTransitGatewayPeeringAttachmentsPages(input *DescribeTransitGatewayPeeringAttachmentsInput, fn func(*DescribeTransitGatewayPeeringAttachmentsOutput, bool) bool) error { + return c.DescribeTransitGatewayPeeringAttachmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTransitGatewayPeeringAttachmentsPagesWithContext same as DescribeTransitGatewayPeeringAttachmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeTransitGatewayPeeringAttachmentsPagesWithContext(ctx aws.Context, input *DescribeTransitGatewayPeeringAttachmentsInput, fn func(*DescribeTransitGatewayPeeringAttachmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTransitGatewayPeeringAttachmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTransitGatewayPeeringAttachmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTransitGatewayPeeringAttachmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeTransitGatewayRouteTables = "DescribeTransitGatewayRouteTables" // DescribeTransitGatewayRouteTablesRequest generates a "aws/request.Request" representing the @@ -22421,7 +24197,7 @@ func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req // status of the volume is ok. If the check fails, the overall status is impaired. // If the status is insufficient-data, then the checks may still be taking place // on your volume at the time. We recommend that you retry the request. For -// more information about volume status, see Monitoring the Status of Your Volumes +// more information about volume status, see Monitoring the status of your volumes // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -22573,12 +24349,12 @@ func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request. // // Describes the specified EBS volumes or all of your EBS volumes. // -// If you are describing a long list of volumes, you can paginate the output -// to make the list more manageable. The MaxResults parameter sets the maximum -// number of results returned in a single page. If the list of results exceeds -// your MaxResults value, then that number of results is returned along with -// a NextToken value that can be passed to a subsequent DescribeVolumes request -// to retrieve the remaining results. +// If you are describing a long list of volumes, we recommend that you paginate +// the output to make the list more manageable. The MaxResults parameter sets +// the maximum number of results returned in a single page. If the list of results +// exceeds your MaxResults value, then that number of results is returned along +// with a NextToken value that can be passed to a subsequent DescribeVolumes +// request to retrieve the remaining results. // // For more information about EBS volumes, see Amazon EBS Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -22713,19 +24489,17 @@ func (c *EC2) DescribeVolumesModificationsRequest(input *DescribeVolumesModifica // DescribeVolumesModifications API operation for Amazon Elastic Compute Cloud. // -// Reports the current modification status of EBS volumes. +// Describes the most recent volume modification request for the specified EBS +// volumes. // -// Current-generation EBS volumes support modification of attributes including -// type, size, and (for io1 volumes) IOPS provisioning while either attached -// to or detached from an instance. Following an action from the API or the -// console to modify a volume, the status of the modification may be modifying, -// optimizing, completed, or failed. If a volume has never been modified, then -// certain elements of the returned VolumeModification objects are null. +// If a volume has never been modified, some information in the output will +// be null. If a volume has been modified more than once, the output includes +// only the most recent modification request. // // You can also use CloudWatch Events to check the status of a modification // to an EBS volume. For information about CloudWatch Events, see the Amazon // CloudWatch Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). -// For more information, see Monitoring Volume Modifications" (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods) +// For more information, see Monitoring volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -23672,6 +25446,13 @@ func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServi // // Describes available services to which you can create a VPC endpoint. // +// When the service provider and the consumer have different accounts multiple +// Availability Zones, and the consumer views the VPC endpoint service information, +// the response only includes the common Availability Zones. For example, when +// the service provider account uses us-east-1a and us-east-1c and the consumer +// uses us-east-1a and us-east-1a and us-east-1b, the response includes the +// VPC endpoint services in the common Availability Zone, us-east-1a. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -24534,7 +26315,7 @@ func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Reques // When a volume with an AWS Marketplace product code is detached from an instance, // the product code is no longer associated with the instance. // -// For more information, see Detaching an Amazon EBS Volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) +// For more information, see Detaching an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -25081,6 +26862,8 @@ func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLin // ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon Elastic Compute Cloud User Guide. // +// You must specify a VPC ID in the request. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -25276,6 +27059,86 @@ func (c *EC2) DisassociateClientVpnTargetNetworkWithContext(ctx aws.Context, inp return out, req.Send() } +const opDisassociateEnclaveCertificateIamRole = "DisassociateEnclaveCertificateIamRole" + +// DisassociateEnclaveCertificateIamRoleRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateEnclaveCertificateIamRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateEnclaveCertificateIamRole for more information on using the DisassociateEnclaveCertificateIamRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateEnclaveCertificateIamRoleRequest method. +// req, resp := client.DisassociateEnclaveCertificateIamRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateEnclaveCertificateIamRole +func (c *EC2) DisassociateEnclaveCertificateIamRoleRequest(input *DisassociateEnclaveCertificateIamRoleInput) (req *request.Request, output *DisassociateEnclaveCertificateIamRoleOutput) { + op := &request.Operation{ + Name: opDisassociateEnclaveCertificateIamRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateEnclaveCertificateIamRoleInput{} + } + + output = &DisassociateEnclaveCertificateIamRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateEnclaveCertificateIamRole API operation for Amazon Elastic Compute Cloud. +// +// Disassociates an IAM role from an AWS Certificate Manager (ACM) certificate. +// Disassociating an IAM role from an ACM certificate removes the Amazon S3 +// object that contains the certificate, certificate chain, and encrypted private +// key from the Amazon S3 bucket. It also revokes the IAM role's permission +// to use the AWS Key Management Service (KMS) customer master key (CMK) used +// to encrypt the private key. This effectively revokes the role's permission +// to use the certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DisassociateEnclaveCertificateIamRole for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisassociateEnclaveCertificateIamRole +func (c *EC2) DisassociateEnclaveCertificateIamRole(input *DisassociateEnclaveCertificateIamRoleInput) (*DisassociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.DisassociateEnclaveCertificateIamRoleRequest(input) + return out, req.Send() +} + +// DisassociateEnclaveCertificateIamRoleWithContext is the same as DisassociateEnclaveCertificateIamRole with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateEnclaveCertificateIamRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DisassociateEnclaveCertificateIamRoleWithContext(ctx aws.Context, input *DisassociateEnclaveCertificateIamRoleInput, opts ...request.Option) (*DisassociateEnclaveCertificateIamRoleOutput, error) { + req, out := c.DisassociateEnclaveCertificateIamRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisassociateIamInstanceProfile = "DisassociateIamInstanceProfile" // DisassociateIamInstanceProfileRequest generates a "aws/request.Request" representing the @@ -25397,7 +27260,7 @@ func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) // DisassociateRouteTable API operation for Amazon Elastic Compute Cloud. // -// Disassociates a subnet from a route table. +// Disassociates a subnet or gateway from a route table. // // After you perform this action, the subnet no longer uses the routes in the // route table. Instead, it uses the routes in the VPC's main route table. For @@ -25796,7 +27659,7 @@ func (c *EC2) EnableEbsEncryptionByDefaultRequest(input *EnableEbsEncryptionByDe // // After you enable encryption by default, you can no longer launch instances // using instance types that do not support encryption. For more information, -// see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). +// see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -25877,6 +27740,9 @@ func (c *EC2) EnableFastSnapshotRestoresRequest(input *EnableFastSnapshotRestore // state. To get the current state of fast snapshot restores, use DescribeFastSnapshotRestores. // To disable fast snapshot restores, use DisableFastSnapshotRestores. // +// For more information, see Amazon EBS fast snapshot restore (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-fast-snapshot-restore.html) +// in the Amazon Elastic Compute Cloud User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -26264,6 +28130,8 @@ func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkD // see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon Elastic Compute Cloud User Guide. // +// You must specify a VPC ID in the request. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -26568,6 +28436,10 @@ func (c *EC2) ExportTransitGatewayRoutesRequest(input *ExportTransitGatewayRoute // S3 bucket. By default, all routes are exported. Alternatively, you can filter // by CIDR range. // +// The routes are saved to the specified bucket in a JSON file. For more information, +// see Export Route Tables to Amazon S3 (https://docs.aws.amazon.com/vpc/latest/tgw/tgw-route-tables.html#tgw-export-route-tables) +// in Transit Gateways. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -26596,6 +28468,218 @@ func (c *EC2) ExportTransitGatewayRoutesWithContext(ctx aws.Context, input *Expo return out, req.Send() } +const opGetAssociatedEnclaveCertificateIamRoles = "GetAssociatedEnclaveCertificateIamRoles" + +// GetAssociatedEnclaveCertificateIamRolesRequest generates a "aws/request.Request" representing the +// client's request for the GetAssociatedEnclaveCertificateIamRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAssociatedEnclaveCertificateIamRoles for more information on using the GetAssociatedEnclaveCertificateIamRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAssociatedEnclaveCertificateIamRolesRequest method. +// req, resp := client.GetAssociatedEnclaveCertificateIamRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedEnclaveCertificateIamRoles +func (c *EC2) GetAssociatedEnclaveCertificateIamRolesRequest(input *GetAssociatedEnclaveCertificateIamRolesInput) (req *request.Request, output *GetAssociatedEnclaveCertificateIamRolesOutput) { + op := &request.Operation{ + Name: opGetAssociatedEnclaveCertificateIamRoles, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAssociatedEnclaveCertificateIamRolesInput{} + } + + output = &GetAssociatedEnclaveCertificateIamRolesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAssociatedEnclaveCertificateIamRoles API operation for Amazon Elastic Compute Cloud. +// +// Returns the IAM roles that are associated with the specified AWS Certificate +// Manager (ACM) certificate. It also returns the name of the Amazon S3 bucket +// and the Amazon S3 object key where the certificate, certificate chain, and +// encrypted private key bundle are stored, and the ARN of the AWS Key Management +// Service (KMS) customer master key (CMK) that's used to encrypt the private +// key. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetAssociatedEnclaveCertificateIamRoles for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedEnclaveCertificateIamRoles +func (c *EC2) GetAssociatedEnclaveCertificateIamRoles(input *GetAssociatedEnclaveCertificateIamRolesInput) (*GetAssociatedEnclaveCertificateIamRolesOutput, error) { + req, out := c.GetAssociatedEnclaveCertificateIamRolesRequest(input) + return out, req.Send() +} + +// GetAssociatedEnclaveCertificateIamRolesWithContext is the same as GetAssociatedEnclaveCertificateIamRoles with the addition of +// the ability to pass a context and additional request options. +// +// See GetAssociatedEnclaveCertificateIamRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetAssociatedEnclaveCertificateIamRolesWithContext(ctx aws.Context, input *GetAssociatedEnclaveCertificateIamRolesInput, opts ...request.Option) (*GetAssociatedEnclaveCertificateIamRolesOutput, error) { + req, out := c.GetAssociatedEnclaveCertificateIamRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAssociatedIpv6PoolCidrs = "GetAssociatedIpv6PoolCidrs" + +// GetAssociatedIpv6PoolCidrsRequest generates a "aws/request.Request" representing the +// client's request for the GetAssociatedIpv6PoolCidrs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAssociatedIpv6PoolCidrs for more information on using the GetAssociatedIpv6PoolCidrs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAssociatedIpv6PoolCidrsRequest method. +// req, resp := client.GetAssociatedIpv6PoolCidrsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedIpv6PoolCidrs +func (c *EC2) GetAssociatedIpv6PoolCidrsRequest(input *GetAssociatedIpv6PoolCidrsInput) (req *request.Request, output *GetAssociatedIpv6PoolCidrsOutput) { + op := &request.Operation{ + Name: opGetAssociatedIpv6PoolCidrs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetAssociatedIpv6PoolCidrsInput{} + } + + output = &GetAssociatedIpv6PoolCidrsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAssociatedIpv6PoolCidrs API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the IPv6 CIDR block associations for a specified IPv6 +// address pool. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetAssociatedIpv6PoolCidrs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetAssociatedIpv6PoolCidrs +func (c *EC2) GetAssociatedIpv6PoolCidrs(input *GetAssociatedIpv6PoolCidrsInput) (*GetAssociatedIpv6PoolCidrsOutput, error) { + req, out := c.GetAssociatedIpv6PoolCidrsRequest(input) + return out, req.Send() +} + +// GetAssociatedIpv6PoolCidrsWithContext is the same as GetAssociatedIpv6PoolCidrs with the addition of +// the ability to pass a context and additional request options. +// +// See GetAssociatedIpv6PoolCidrs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetAssociatedIpv6PoolCidrsWithContext(ctx aws.Context, input *GetAssociatedIpv6PoolCidrsInput, opts ...request.Option) (*GetAssociatedIpv6PoolCidrsOutput, error) { + req, out := c.GetAssociatedIpv6PoolCidrsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetAssociatedIpv6PoolCidrsPages iterates over the pages of a GetAssociatedIpv6PoolCidrs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetAssociatedIpv6PoolCidrs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetAssociatedIpv6PoolCidrs operation. +// pageNum := 0 +// err := client.GetAssociatedIpv6PoolCidrsPages(params, +// func(page *ec2.GetAssociatedIpv6PoolCidrsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetAssociatedIpv6PoolCidrsPages(input *GetAssociatedIpv6PoolCidrsInput, fn func(*GetAssociatedIpv6PoolCidrsOutput, bool) bool) error { + return c.GetAssociatedIpv6PoolCidrsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetAssociatedIpv6PoolCidrsPagesWithContext same as GetAssociatedIpv6PoolCidrsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetAssociatedIpv6PoolCidrsPagesWithContext(ctx aws.Context, input *GetAssociatedIpv6PoolCidrsInput, fn func(*GetAssociatedIpv6PoolCidrsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetAssociatedIpv6PoolCidrsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetAssociatedIpv6PoolCidrsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetAssociatedIpv6PoolCidrsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetCapacityReservationUsage = "GetCapacityReservationUsage" // GetCapacityReservationUsageRequest generates a "aws/request.Request" representing the @@ -26961,7 +29045,7 @@ func (c *EC2) GetDefaultCreditSpecificationRequest(input *GetDefaultCreditSpecif // Describes the default credit option for CPU usage of a burstable performance // instance family. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -27149,6 +29233,138 @@ func (c *EC2) GetEbsEncryptionByDefaultWithContext(ctx aws.Context, input *GetEb return out, req.Send() } +const opGetGroupsForCapacityReservation = "GetGroupsForCapacityReservation" + +// GetGroupsForCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the GetGroupsForCapacityReservation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetGroupsForCapacityReservation for more information on using the GetGroupsForCapacityReservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetGroupsForCapacityReservationRequest method. +// req, resp := client.GetGroupsForCapacityReservationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetGroupsForCapacityReservation +func (c *EC2) GetGroupsForCapacityReservationRequest(input *GetGroupsForCapacityReservationInput) (req *request.Request, output *GetGroupsForCapacityReservationOutput) { + op := &request.Operation{ + Name: opGetGroupsForCapacityReservation, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetGroupsForCapacityReservationInput{} + } + + output = &GetGroupsForCapacityReservationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetGroupsForCapacityReservation API operation for Amazon Elastic Compute Cloud. +// +// Lists the resource groups to which a Capacity Reservation has been added. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetGroupsForCapacityReservation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetGroupsForCapacityReservation +func (c *EC2) GetGroupsForCapacityReservation(input *GetGroupsForCapacityReservationInput) (*GetGroupsForCapacityReservationOutput, error) { + req, out := c.GetGroupsForCapacityReservationRequest(input) + return out, req.Send() +} + +// GetGroupsForCapacityReservationWithContext is the same as GetGroupsForCapacityReservation with the addition of +// the ability to pass a context and additional request options. +// +// See GetGroupsForCapacityReservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetGroupsForCapacityReservationWithContext(ctx aws.Context, input *GetGroupsForCapacityReservationInput, opts ...request.Option) (*GetGroupsForCapacityReservationOutput, error) { + req, out := c.GetGroupsForCapacityReservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetGroupsForCapacityReservationPages iterates over the pages of a GetGroupsForCapacityReservation operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetGroupsForCapacityReservation method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetGroupsForCapacityReservation operation. +// pageNum := 0 +// err := client.GetGroupsForCapacityReservationPages(params, +// func(page *ec2.GetGroupsForCapacityReservationOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetGroupsForCapacityReservationPages(input *GetGroupsForCapacityReservationInput, fn func(*GetGroupsForCapacityReservationOutput, bool) bool) error { + return c.GetGroupsForCapacityReservationPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetGroupsForCapacityReservationPagesWithContext same as GetGroupsForCapacityReservationPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetGroupsForCapacityReservationPagesWithContext(ctx aws.Context, input *GetGroupsForCapacityReservationInput, fn func(*GetGroupsForCapacityReservationOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetGroupsForCapacityReservationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetGroupsForCapacityReservationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetGroupsForCapacityReservationOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetHostReservationPurchasePreview = "GetHostReservationPurchasePreview" // GetHostReservationPurchasePreviewRequest generates a "aws/request.Request" representing the @@ -27275,6 +29491,12 @@ func (c *EC2) GetLaunchTemplateDataRequest(input *GetLaunchTemplateDataInput) (r // Retrieves the configuration data of the specified instance. You can use this // data to create a launch template. // +// This action calls on other describe actions to get instance information. +// Depending on your instance configuration, you may need to allow the following +// actions in your IAM policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications, +// DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, +// you can allow describe* depending on your instance requirements. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -27303,6 +29525,271 @@ func (c *EC2) GetLaunchTemplateDataWithContext(ctx aws.Context, input *GetLaunch return out, req.Send() } +const opGetManagedPrefixListAssociations = "GetManagedPrefixListAssociations" + +// GetManagedPrefixListAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the GetManagedPrefixListAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetManagedPrefixListAssociations for more information on using the GetManagedPrefixListAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetManagedPrefixListAssociationsRequest method. +// req, resp := client.GetManagedPrefixListAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListAssociations +func (c *EC2) GetManagedPrefixListAssociationsRequest(input *GetManagedPrefixListAssociationsInput) (req *request.Request, output *GetManagedPrefixListAssociationsOutput) { + op := &request.Operation{ + Name: opGetManagedPrefixListAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetManagedPrefixListAssociationsInput{} + } + + output = &GetManagedPrefixListAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetManagedPrefixListAssociations API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the resources that are associated with the specified +// managed prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetManagedPrefixListAssociations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListAssociations +func (c *EC2) GetManagedPrefixListAssociations(input *GetManagedPrefixListAssociationsInput) (*GetManagedPrefixListAssociationsOutput, error) { + req, out := c.GetManagedPrefixListAssociationsRequest(input) + return out, req.Send() +} + +// GetManagedPrefixListAssociationsWithContext is the same as GetManagedPrefixListAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See GetManagedPrefixListAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListAssociationsWithContext(ctx aws.Context, input *GetManagedPrefixListAssociationsInput, opts ...request.Option) (*GetManagedPrefixListAssociationsOutput, error) { + req, out := c.GetManagedPrefixListAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetManagedPrefixListAssociationsPages iterates over the pages of a GetManagedPrefixListAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetManagedPrefixListAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetManagedPrefixListAssociations operation. +// pageNum := 0 +// err := client.GetManagedPrefixListAssociationsPages(params, +// func(page *ec2.GetManagedPrefixListAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetManagedPrefixListAssociationsPages(input *GetManagedPrefixListAssociationsInput, fn func(*GetManagedPrefixListAssociationsOutput, bool) bool) error { + return c.GetManagedPrefixListAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetManagedPrefixListAssociationsPagesWithContext same as GetManagedPrefixListAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListAssociationsPagesWithContext(ctx aws.Context, input *GetManagedPrefixListAssociationsInput, fn func(*GetManagedPrefixListAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetManagedPrefixListAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetManagedPrefixListAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetManagedPrefixListAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetManagedPrefixListEntries = "GetManagedPrefixListEntries" + +// GetManagedPrefixListEntriesRequest generates a "aws/request.Request" representing the +// client's request for the GetManagedPrefixListEntries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetManagedPrefixListEntries for more information on using the GetManagedPrefixListEntries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetManagedPrefixListEntriesRequest method. +// req, resp := client.GetManagedPrefixListEntriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListEntries +func (c *EC2) GetManagedPrefixListEntriesRequest(input *GetManagedPrefixListEntriesInput) (req *request.Request, output *GetManagedPrefixListEntriesOutput) { + op := &request.Operation{ + Name: opGetManagedPrefixListEntries, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetManagedPrefixListEntriesInput{} + } + + output = &GetManagedPrefixListEntriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetManagedPrefixListEntries API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the entries for a specified managed prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetManagedPrefixListEntries for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListEntries +func (c *EC2) GetManagedPrefixListEntries(input *GetManagedPrefixListEntriesInput) (*GetManagedPrefixListEntriesOutput, error) { + req, out := c.GetManagedPrefixListEntriesRequest(input) + return out, req.Send() +} + +// GetManagedPrefixListEntriesWithContext is the same as GetManagedPrefixListEntries with the addition of +// the ability to pass a context and additional request options. +// +// See GetManagedPrefixListEntries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListEntriesWithContext(ctx aws.Context, input *GetManagedPrefixListEntriesInput, opts ...request.Option) (*GetManagedPrefixListEntriesOutput, error) { + req, out := c.GetManagedPrefixListEntriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetManagedPrefixListEntriesPages iterates over the pages of a GetManagedPrefixListEntries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetManagedPrefixListEntries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetManagedPrefixListEntries operation. +// pageNum := 0 +// err := client.GetManagedPrefixListEntriesPages(params, +// func(page *ec2.GetManagedPrefixListEntriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetManagedPrefixListEntriesPages(input *GetManagedPrefixListEntriesInput, fn func(*GetManagedPrefixListEntriesOutput, bool) bool) error { + return c.GetManagedPrefixListEntriesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetManagedPrefixListEntriesPagesWithContext same as GetManagedPrefixListEntriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListEntriesPagesWithContext(ctx aws.Context, input *GetManagedPrefixListEntriesInput, fn func(*GetManagedPrefixListEntriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetManagedPrefixListEntriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetManagedPrefixListEntriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetManagedPrefixListEntriesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetPasswordData = "GetPasswordData" // GetPasswordDataRequest generates a "aws/request.Request" representing the @@ -27635,6 +30122,12 @@ func (c *EC2) GetTransitGatewayMulticastDomainAssociationsRequest(input *GetTran Name: opGetTransitGatewayMulticastDomainAssociations, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -27679,6 +30172,191 @@ func (c *EC2) GetTransitGatewayMulticastDomainAssociationsWithContext(ctx aws.Co return out, req.Send() } +// GetTransitGatewayMulticastDomainAssociationsPages iterates over the pages of a GetTransitGatewayMulticastDomainAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetTransitGatewayMulticastDomainAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetTransitGatewayMulticastDomainAssociations operation. +// pageNum := 0 +// err := client.GetTransitGatewayMulticastDomainAssociationsPages(params, +// func(page *ec2.GetTransitGatewayMulticastDomainAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetTransitGatewayMulticastDomainAssociationsPages(input *GetTransitGatewayMulticastDomainAssociationsInput, fn func(*GetTransitGatewayMulticastDomainAssociationsOutput, bool) bool) error { + return c.GetTransitGatewayMulticastDomainAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetTransitGatewayMulticastDomainAssociationsPagesWithContext same as GetTransitGatewayMulticastDomainAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayMulticastDomainAssociationsPagesWithContext(ctx aws.Context, input *GetTransitGatewayMulticastDomainAssociationsInput, fn func(*GetTransitGatewayMulticastDomainAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetTransitGatewayMulticastDomainAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetTransitGatewayMulticastDomainAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayMulticastDomainAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetTransitGatewayPrefixListReferences = "GetTransitGatewayPrefixListReferences" + +// GetTransitGatewayPrefixListReferencesRequest generates a "aws/request.Request" representing the +// client's request for the GetTransitGatewayPrefixListReferences operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTransitGatewayPrefixListReferences for more information on using the GetTransitGatewayPrefixListReferences +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTransitGatewayPrefixListReferencesRequest method. +// req, resp := client.GetTransitGatewayPrefixListReferencesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayPrefixListReferences +func (c *EC2) GetTransitGatewayPrefixListReferencesRequest(input *GetTransitGatewayPrefixListReferencesInput) (req *request.Request, output *GetTransitGatewayPrefixListReferencesOutput) { + op := &request.Operation{ + Name: opGetTransitGatewayPrefixListReferences, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetTransitGatewayPrefixListReferencesInput{} + } + + output = &GetTransitGatewayPrefixListReferencesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTransitGatewayPrefixListReferences API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the prefix list references in a specified transit +// gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetTransitGatewayPrefixListReferences for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayPrefixListReferences +func (c *EC2) GetTransitGatewayPrefixListReferences(input *GetTransitGatewayPrefixListReferencesInput) (*GetTransitGatewayPrefixListReferencesOutput, error) { + req, out := c.GetTransitGatewayPrefixListReferencesRequest(input) + return out, req.Send() +} + +// GetTransitGatewayPrefixListReferencesWithContext is the same as GetTransitGatewayPrefixListReferences with the addition of +// the ability to pass a context and additional request options. +// +// See GetTransitGatewayPrefixListReferences for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayPrefixListReferencesWithContext(ctx aws.Context, input *GetTransitGatewayPrefixListReferencesInput, opts ...request.Option) (*GetTransitGatewayPrefixListReferencesOutput, error) { + req, out := c.GetTransitGatewayPrefixListReferencesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetTransitGatewayPrefixListReferencesPages iterates over the pages of a GetTransitGatewayPrefixListReferences operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetTransitGatewayPrefixListReferences method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetTransitGatewayPrefixListReferences operation. +// pageNum := 0 +// err := client.GetTransitGatewayPrefixListReferencesPages(params, +// func(page *ec2.GetTransitGatewayPrefixListReferencesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetTransitGatewayPrefixListReferencesPages(input *GetTransitGatewayPrefixListReferencesInput, fn func(*GetTransitGatewayPrefixListReferencesOutput, bool) bool) error { + return c.GetTransitGatewayPrefixListReferencesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetTransitGatewayPrefixListReferencesPagesWithContext same as GetTransitGatewayPrefixListReferencesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayPrefixListReferencesPagesWithContext(ctx aws.Context, input *GetTransitGatewayPrefixListReferencesInput, fn func(*GetTransitGatewayPrefixListReferencesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetTransitGatewayPrefixListReferencesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetTransitGatewayPrefixListReferencesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayPrefixListReferencesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetTransitGatewayRouteTableAssociations = "GetTransitGatewayRouteTableAssociations" // GetTransitGatewayRouteTableAssociationsRequest generates a "aws/request.Request" representing the @@ -28413,6 +31091,84 @@ func (c *EC2) ImportVolumeWithContext(ctx aws.Context, input *ImportVolumeInput, return out, req.Send() } +const opModifyAvailabilityZoneGroup = "ModifyAvailabilityZoneGroup" + +// ModifyAvailabilityZoneGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyAvailabilityZoneGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyAvailabilityZoneGroup for more information on using the ModifyAvailabilityZoneGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyAvailabilityZoneGroupRequest method. +// req, resp := client.ModifyAvailabilityZoneGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyAvailabilityZoneGroup +func (c *EC2) ModifyAvailabilityZoneGroupRequest(input *ModifyAvailabilityZoneGroupInput) (req *request.Request, output *ModifyAvailabilityZoneGroupOutput) { + op := &request.Operation{ + Name: opModifyAvailabilityZoneGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyAvailabilityZoneGroupInput{} + } + + output = &ModifyAvailabilityZoneGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyAvailabilityZoneGroup API operation for Amazon Elastic Compute Cloud. +// +// Changes the opt-in status of the Local Zone and Wavelength Zone group for +// your account. +// +// Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) +// to view the value for GroupName. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyAvailabilityZoneGroup for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyAvailabilityZoneGroup +func (c *EC2) ModifyAvailabilityZoneGroup(input *ModifyAvailabilityZoneGroupInput) (*ModifyAvailabilityZoneGroupOutput, error) { + req, out := c.ModifyAvailabilityZoneGroupRequest(input) + return out, req.Send() +} + +// ModifyAvailabilityZoneGroupWithContext is the same as ModifyAvailabilityZoneGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyAvailabilityZoneGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyAvailabilityZoneGroupWithContext(ctx aws.Context, input *ModifyAvailabilityZoneGroupInput, opts ...request.Option) (*ModifyAvailabilityZoneGroupOutput, error) { + req, out := c.ModifyAvailabilityZoneGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyCapacityReservation = "ModifyCapacityReservation" // ModifyCapacityReservationRequest generates a "aws/request.Request" representing the @@ -28536,10 +31292,8 @@ func (c *EC2) ModifyClientVpnEndpointRequest(input *ModifyClientVpnEndpointInput // ModifyClientVpnEndpoint API operation for Amazon Elastic Compute Cloud. // -// Modifies the specified Client VPN endpoint. You can only modify an endpoint's -// server certificate information, client connection logging information, DNS -// server, and description. Modifying the DNS server resets existing client -// connections. +// Modifies the specified Client VPN endpoint. Modifying the DNS server resets +// existing client connections. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -28626,7 +31380,7 @@ func (c *EC2) ModifyDefaultCreditSpecificationRequest(input *ModifyDefaultCredit // can call GetDefaultCreditSpecification and check DefaultCreditSpecification // for updates. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -29338,7 +32092,7 @@ func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput // we recommend that you use the ModifyNetworkInterfaceAttribute action. // // To modify some attributes, the instance must be stopped. For more information, -// see Modifying Attributes of a Stopped Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) +// see Modifying attributes of a stopped instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -29493,7 +32247,7 @@ func (c *EC2) ModifyInstanceCreditSpecificationRequest(input *ModifyInstanceCred // Modifies the credit option for CPU usage on a running or stopped burstable // performance instance. The credit options are standard and unlimited. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -29648,7 +32402,7 @@ func (c *EC2) ModifyInstanceMetadataOptionsRequest(input *ModifyInstanceMetadata // the API responds with a state of “pending”. After the parameter modifications // are successfully applied to the instance, the state of the modifications // changes from “pending” to “applied” in subsequent describe-instances -// API calls. For more information, see Instance Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). +// API calls. For more information, see Instance metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -29849,6 +32603,86 @@ func (c *EC2) ModifyLaunchTemplateWithContext(ctx aws.Context, input *ModifyLaun return out, req.Send() } +const opModifyManagedPrefixList = "ModifyManagedPrefixList" + +// ModifyManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the ModifyManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyManagedPrefixList for more information on using the ModifyManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyManagedPrefixListRequest method. +// req, resp := client.ModifyManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyManagedPrefixList +func (c *EC2) ModifyManagedPrefixListRequest(input *ModifyManagedPrefixListInput) (req *request.Request, output *ModifyManagedPrefixListOutput) { + op := &request.Operation{ + Name: opModifyManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyManagedPrefixListInput{} + } + + output = &ModifyManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified managed prefix list. +// +// Adding or removing entries in a prefix list creates a new version of the +// prefix list. Changing the name of the prefix list does not affect the version. +// +// If you specify a current version number that does not match the true current +// version number, the request fails. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyManagedPrefixList +func (c *EC2) ModifyManagedPrefixList(input *ModifyManagedPrefixListInput) (*ModifyManagedPrefixListOutput, error) { + req, out := c.ModifyManagedPrefixListRequest(input) + return out, req.Send() +} + +// ModifyManagedPrefixListWithContext is the same as ModifyManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyManagedPrefixListWithContext(ctx aws.Context, input *ModifyManagedPrefixListInput, opts ...request.Option) (*ModifyManagedPrefixListOutput, error) { + req, out := c.ModifyManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" // ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the @@ -30061,7 +32895,7 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput // be made public. Snapshots encrypted with your default CMK cannot be shared // with other accounts. // -// For more information about modifying snapshot permissions, see Sharing Snapshots +// For more information about modifying snapshot permissions, see Sharing snapshots // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -30506,6 +33340,157 @@ func (c *EC2) ModifyTrafficMirrorSessionWithContext(ctx aws.Context, input *Modi return out, req.Send() } +const opModifyTransitGateway = "ModifyTransitGateway" + +// ModifyTransitGatewayRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTransitGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTransitGateway for more information on using the ModifyTransitGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTransitGatewayRequest method. +// req, resp := client.ModifyTransitGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGateway +func (c *EC2) ModifyTransitGatewayRequest(input *ModifyTransitGatewayInput) (req *request.Request, output *ModifyTransitGatewayOutput) { + op := &request.Operation{ + Name: opModifyTransitGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTransitGatewayInput{} + } + + output = &ModifyTransitGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTransitGateway API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified transit gateway. When you modify a transit gateway, +// the modified options are applied to new transit gateway attachments only. +// Your existing transit gateway attachments are not modified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTransitGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGateway +func (c *EC2) ModifyTransitGateway(input *ModifyTransitGatewayInput) (*ModifyTransitGatewayOutput, error) { + req, out := c.ModifyTransitGatewayRequest(input) + return out, req.Send() +} + +// ModifyTransitGatewayWithContext is the same as ModifyTransitGateway with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTransitGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTransitGatewayWithContext(ctx aws.Context, input *ModifyTransitGatewayInput, opts ...request.Option) (*ModifyTransitGatewayOutput, error) { + req, out := c.ModifyTransitGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTransitGatewayPrefixListReference = "ModifyTransitGatewayPrefixListReference" + +// ModifyTransitGatewayPrefixListReferenceRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTransitGatewayPrefixListReference operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTransitGatewayPrefixListReference for more information on using the ModifyTransitGatewayPrefixListReference +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTransitGatewayPrefixListReferenceRequest method. +// req, resp := client.ModifyTransitGatewayPrefixListReferenceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGatewayPrefixListReference +func (c *EC2) ModifyTransitGatewayPrefixListReferenceRequest(input *ModifyTransitGatewayPrefixListReferenceInput) (req *request.Request, output *ModifyTransitGatewayPrefixListReferenceOutput) { + op := &request.Operation{ + Name: opModifyTransitGatewayPrefixListReference, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTransitGatewayPrefixListReferenceInput{} + } + + output = &ModifyTransitGatewayPrefixListReferenceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTransitGatewayPrefixListReference API operation for Amazon Elastic Compute Cloud. +// +// Modifies a reference (route) to a prefix list in a specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTransitGatewayPrefixListReference for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGatewayPrefixListReference +func (c *EC2) ModifyTransitGatewayPrefixListReference(input *ModifyTransitGatewayPrefixListReferenceInput) (*ModifyTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.ModifyTransitGatewayPrefixListReferenceRequest(input) + return out, req.Send() +} + +// ModifyTransitGatewayPrefixListReferenceWithContext is the same as ModifyTransitGatewayPrefixListReference with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTransitGatewayPrefixListReference for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTransitGatewayPrefixListReferenceWithContext(ctx aws.Context, input *ModifyTransitGatewayPrefixListReferenceInput, opts ...request.Option) (*ModifyTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.ModifyTransitGatewayPrefixListReferenceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyTransitGatewayVpcAttachment = "ModifyTransitGatewayVpcAttachment" // ModifyTransitGatewayVpcAttachmentRequest generates a "aws/request.Request" representing the @@ -30628,30 +33613,30 @@ func (c *EC2) ModifyVolumeRequest(input *ModifyVolumeInput) (req *request.Reques // size, volume type, and IOPS capacity. If your EBS volume is attached to a // current-generation EC2 instance type, you may be able to apply these changes // without stopping the instance or detaching the volume from it. For more information -// about modifying an EBS volume running Linux, see Modifying the Size, IOPS, -// or Type of an EBS Volume on Linux (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html). +// about modifying an EBS volume running Linux, see Modifying the size, IOPS, +// or type of an EBS volume on Linux (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html). // For more information about modifying an EBS volume running Windows, see Modifying -// the Size, IOPS, or Type of an EBS Volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). +// the size, IOPS, or type of an EBS volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). // // When you complete a resize operation on your volume, you need to extend the // volume's file-system size to take advantage of the new storage capacity. // For information about extending a Linux file system, see Extending a Linux -// File System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#recognize-expanded-volume-linux). +// file system (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#recognize-expanded-volume-linux). // For information about extending a Windows file system, see Extending a Windows -// File System (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html#recognize-expanded-volume-windows). +// file system (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html#recognize-expanded-volume-windows). // // You can use CloudWatch Events to check the status of a modification to an // EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch // Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). // You can also track the status of a modification using DescribeVolumesModifications. // For information about tracking status changes using either method, see Monitoring -// Volume Modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). +// volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). // // With previous-generation instance types, resizing an EBS volume may require // detaching and reattaching the volume or stopping and restarting the instance. -// For more information, see Modifying the Size, IOPS, or Type of an EBS Volume +// For more information, see Modifying the size, IOPS, or type of an EBS volume // on Linux (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html) -// and Modifying the Size, IOPS, or Type of an EBS Volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). +// and Modifying the size, IOPS, or type of an EBS volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). // // If you reach the maximum volume modification rate per volume limit, you will // need to wait at least six hours before applying further modifications to @@ -31384,8 +34369,9 @@ func (c *EC2) ModifyVpnConnectionRequest(input *ModifyVpnConnectionInput) (req * // ModifyVpnConnection API operation for Amazon Elastic Compute Cloud. // -// Modifies the target gateway of an AWS Site-to-Site VPN connection. The following -// migration options are available: +// Modifies the customer gateway or the target gateway of an AWS Site-to-Site +// VPN connection. To modify the target gateway, the following migration options +// are available: // // * An existing virtual private gateway to a new virtual private gateway // @@ -31421,9 +34407,9 @@ func (c *EC2) ModifyVpnConnectionRequest(input *ModifyVpnConnectionInput) (req * // gateway route table. // // After you perform this operation, the AWS VPN endpoint's IP addresses on -// the AWS side and the tunnel options remain intact. Your s2slong; connection -// will be temporarily unavailable for approximately 10 minutes while we provision -// the new endpoints +// the AWS side and the tunnel options remain intact. Your AWS Site-to-Site +// VPN connection will be temporarily unavailable for a brief period while we +// provision the new endpoints. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -31453,6 +34439,85 @@ func (c *EC2) ModifyVpnConnectionWithContext(ctx aws.Context, input *ModifyVpnCo return out, req.Send() } +const opModifyVpnConnectionOptions = "ModifyVpnConnectionOptions" + +// ModifyVpnConnectionOptionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnConnectionOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnConnectionOptions for more information on using the ModifyVpnConnectionOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnConnectionOptionsRequest method. +// req, resp := client.ModifyVpnConnectionOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnConnectionOptions +func (c *EC2) ModifyVpnConnectionOptionsRequest(input *ModifyVpnConnectionOptionsInput) (req *request.Request, output *ModifyVpnConnectionOptionsOutput) { + op := &request.Operation{ + Name: opModifyVpnConnectionOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnConnectionOptionsInput{} + } + + output = &ModifyVpnConnectionOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnConnectionOptions API operation for Amazon Elastic Compute Cloud. +// +// Modifies the connection options for your Site-to-Site VPN connection. +// +// When you modify the VPN connection options, the VPN endpoint IP addresses +// on the AWS side do not change, and the tunnel options do not change. Your +// VPN connection will be temporarily unavailable for a brief period while the +// VPN connection is updated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnConnectionOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnConnectionOptions +func (c *EC2) ModifyVpnConnectionOptions(input *ModifyVpnConnectionOptionsInput) (*ModifyVpnConnectionOptionsOutput, error) { + req, out := c.ModifyVpnConnectionOptionsRequest(input) + return out, req.Send() +} + +// ModifyVpnConnectionOptionsWithContext is the same as ModifyVpnConnectionOptions with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnConnectionOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnConnectionOptionsWithContext(ctx aws.Context, input *ModifyVpnConnectionOptionsInput, opts ...request.Option) (*ModifyVpnConnectionOptionsOutput, error) { + req, out := c.ModifyVpnConnectionOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyVpnTunnelCertificate = "ModifyVpnTunnelCertificate" // ModifyVpnTunnelCertificateRequest generates a "aws/request.Request" representing the @@ -31650,7 +34715,7 @@ func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *reques // MonitorInstances API operation for Amazon Elastic Compute Cloud. // // Enables detailed monitoring for a running instance. Otherwise, basic monitoring -// is enabled. For more information, see Monitoring Your Instances and Volumes +// is enabled. For more information, see Monitoring your instances and volumes // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -31808,9 +34873,10 @@ func (c *EC2) ProvisionByoipCidrRequest(input *ProvisionByoipCidrInput) (req *re // ProvisionByoipCidr API operation for Amazon Elastic Compute Cloud. // -// Provisions an address range for use with your AWS resources through bring -// your own IP addresses (BYOIP) and creates a corresponding address pool. After -// the address range is provisioned, it is ready to be advertised using AdvertiseByoipCidr. +// Provisions an IPv4 or IPv6 address range for use with your AWS resources +// through bring your own IP addresses (BYOIP) and creates a corresponding address +// pool. After the address range is provisioned, it is ready to be advertised +// using AdvertiseByoipCidr. // // AWS verifies that you own the address range and are authorized to advertise // it. You must ensure that the address range is registered to you and that @@ -31823,8 +34889,8 @@ func (c *EC2) ProvisionByoipCidrRequest(input *ProvisionByoipCidrInput) (req *re // immediately, but the address range is not ready to use until its status changes // from pending-provision to provisioned. To monitor the status of an address // range, use DescribeByoipCidrs. To allocate an Elastic IP address from your -// address pool, use AllocateAddress with either the specific address from the -// address pool or the ID of the address pool. +// IPv4 address pool, use AllocateAddress with either the specific address from +// the address pool or the ID of the address pool. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -32150,11 +35216,11 @@ func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request. // succeeds if the instances are valid and belong to you. Requests to reboot // terminated instances are ignored. // -// If an instance does not cleanly shut down within four minutes, Amazon EC2 +// If an instance does not cleanly shut down within a few minutes, Amazon EC2 // performs a hard reboot. // -// For more information about troubleshooting, see Getting Console Output and -// Rebooting Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) +// For more information about troubleshooting, see Getting console output and +// rebooting instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -32231,7 +35297,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // // Registers an AMI. When you're creating an AMI, this is the final step you // must complete before you can launch an instance from the AMI. For more information -// about creating AMIs, see Creating Your Own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) +// about creating AMIs, see Creating your own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) // in the Amazon Elastic Compute Cloud User Guide. // // For Amazon EBS-backed instances, CreateImage creates and registers the AMI @@ -32239,31 +35305,34 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // // You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from // a snapshot of a root device volume. You specify the snapshot using the block -// device mapping. For more information, see Launching a Linux Instance from -// a Backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html) +// device mapping. For more information, see Launching a Linux instance from +// a backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. // -// You can't register an image where a secondary (non-root) snapshot has AWS -// Marketplace product codes. +// If any snapshots have AWS Marketplace product codes, they are copied to the +// new AMI. // // Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) // and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code // associated with an AMI to verify the subscription status for package updates. // To create a new AMI for operating systems that require a billing product -// code, do the following: +// code, instead of registering the AMI, do the following to preserve the billing +// product code association: // // Launch an instance from an existing AMI with that billing product code. // // Customize the instance. // -// Create a new AMI from the instance using CreateImage to preserve the billing -// product code association. +// Create an AMI from the instance using CreateImage. // // If you purchase a Reserved Instance to apply to an On-Demand Instance that // was launched from an AMI with a billing product code, make sure that the // Reserved Instance has the matching billing product code. If you purchase // a Reserved Instance without the matching billing product code, the Reserved -// Instance will not be applied to the On-Demand Instance. +// Instance will not be applied to the On-Demand Instance. For information about +// how to obtain the platform details and billing information of an AMI, see +// Obtaining billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) +// in the Amazon Elastic Compute Cloud User Guide. // // If needed, you can deregister an AMI at any time. Any modifications you make // to an AMI backed by an instance store volume invalidates its registration. @@ -32298,6 +35367,83 @@ func (c *EC2) RegisterImageWithContext(ctx aws.Context, input *RegisterImageInpu return out, req.Send() } +const opRegisterInstanceEventNotificationAttributes = "RegisterInstanceEventNotificationAttributes" + +// RegisterInstanceEventNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the RegisterInstanceEventNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterInstanceEventNotificationAttributes for more information on using the RegisterInstanceEventNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterInstanceEventNotificationAttributesRequest method. +// req, resp := client.RegisterInstanceEventNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterInstanceEventNotificationAttributes +func (c *EC2) RegisterInstanceEventNotificationAttributesRequest(input *RegisterInstanceEventNotificationAttributesInput) (req *request.Request, output *RegisterInstanceEventNotificationAttributesOutput) { + op := &request.Operation{ + Name: opRegisterInstanceEventNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterInstanceEventNotificationAttributesInput{} + } + + output = &RegisterInstanceEventNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Registers a set of tag keys to include in scheduled event notifications for +// your resources. +// +// To remove tags, use . +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RegisterInstanceEventNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterInstanceEventNotificationAttributes +func (c *EC2) RegisterInstanceEventNotificationAttributes(input *RegisterInstanceEventNotificationAttributesInput) (*RegisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.RegisterInstanceEventNotificationAttributesRequest(input) + return out, req.Send() +} + +// RegisterInstanceEventNotificationAttributesWithContext is the same as RegisterInstanceEventNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterInstanceEventNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RegisterInstanceEventNotificationAttributesWithContext(ctx aws.Context, input *RegisterInstanceEventNotificationAttributesInput, opts ...request.Option) (*RegisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.RegisterInstanceEventNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterTransitGatewayMulticastGroupMembers = "RegisterTransitGatewayMulticastGroupMembers" // RegisterTransitGatewayMulticastGroupMembersRequest generates a "aws/request.Request" representing the @@ -33559,11 +36705,11 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // ensuring that the Spot Instances in your Spot Fleet are in different Spot // pools, you can improve the availability of your fleet. // -// You can specify tags for the Spot Instances. You cannot tag other resource -// types in a Spot Fleet request because only the instance resource type is -// supported. +// You can specify tags for the Spot Fleet request and instances launched by +// the fleet. You cannot tag other resource types in a Spot Fleet request because +// only the spot-fleet-request and instance resource types are supported. // -// For more information, see Spot Fleet Requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) +// For more information, see Spot Fleet requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -33640,7 +36786,7 @@ func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req // // Creates a Spot Instance request. // -// For more information, see Spot Instance Requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// For more information, see Spot Instance requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -34109,7 +37255,7 @@ func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) // // Resets permission settings for the specified snapshot. // -// For more information about modifying snapshot permissions, see Sharing Snapshots +// For more information about modifying snapshot permissions, see Sharing snapshots // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -34218,6 +37364,81 @@ func (c *EC2) RestoreAddressToClassicWithContext(ctx aws.Context, input *Restore return out, req.Send() } +const opRestoreManagedPrefixListVersion = "RestoreManagedPrefixListVersion" + +// RestoreManagedPrefixListVersionRequest generates a "aws/request.Request" representing the +// client's request for the RestoreManagedPrefixListVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreManagedPrefixListVersion for more information on using the RestoreManagedPrefixListVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreManagedPrefixListVersionRequest method. +// req, resp := client.RestoreManagedPrefixListVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreManagedPrefixListVersion +func (c *EC2) RestoreManagedPrefixListVersionRequest(input *RestoreManagedPrefixListVersionInput) (req *request.Request, output *RestoreManagedPrefixListVersionOutput) { + op := &request.Operation{ + Name: opRestoreManagedPrefixListVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreManagedPrefixListVersionInput{} + } + + output = &RestoreManagedPrefixListVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreManagedPrefixListVersion API operation for Amazon Elastic Compute Cloud. +// +// Restores the entries from a previous version of a managed prefix list to +// a new version of the prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RestoreManagedPrefixListVersion for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreManagedPrefixListVersion +func (c *EC2) RestoreManagedPrefixListVersion(input *RestoreManagedPrefixListVersionInput) (*RestoreManagedPrefixListVersionOutput, error) { + req, out := c.RestoreManagedPrefixListVersionRequest(input) + return out, req.Send() +} + +// RestoreManagedPrefixListVersionWithContext is the same as RestoreManagedPrefixListVersion with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreManagedPrefixListVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RestoreManagedPrefixListVersionWithContext(ctx aws.Context, input *RestoreManagedPrefixListVersionInput, opts ...request.Option) (*RestoreManagedPrefixListVersionOutput, error) { + req, out := c.RestoreManagedPrefixListVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRevokeClientVpnIngress = "RevokeClientVpnIngress" // RevokeClientVpnIngressRequest generates a "aws/request.Request" representing the @@ -34331,16 +37552,22 @@ func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressI output = &RevokeSecurityGroupEgressOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // RevokeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud. // // [VPC only] Removes the specified egress rules from a security group for EC2-VPC. -// This action doesn't apply to security groups for use in EC2-Classic. To remove -// a rule, the values that you specify (for example, ports) must match the existing -// rule's values exactly. +// This action does not apply to security groups for use in EC2-Classic. To +// remove a rule, the values that you specify (for example, ports) must match +// the existing rule's values exactly. +// +// [Default VPC] If the values you specify do not match the existing rule's +// values, no error is returned, and the output describes the security group +// rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. // // Each rule consists of the protocol and the IPv4 or IPv6 CIDR range or source // security group. For the TCP and UDP protocols, you must also specify the @@ -34418,7 +37645,6 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres output = &RevokeSecurityGroupIngressOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -34428,9 +37654,12 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres // the values that you specify (for example, ports) must match the existing // rule's values exactly. // -// [EC2-Classic only] If the values you specify do not match the existing rule's -// values, no error is returned. Use DescribeSecurityGroups to verify that the -// rule has been removed. +// [EC2-Classic , default VPC] If the values you specify do not match the existing +// rule's values, no error is returned, and the output describes the security +// group rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. // // Each rule consists of the protocol and the CIDR range or source security // group. For the TCP and UDP protocols, you must also specify the destination @@ -34528,17 +37757,17 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // // * Some instance types must be launched into a VPC. If you do not have // a default VPC, or if you do not specify a subnet ID, the request fails. -// For more information, see Instance Types Available Only in a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). +// For more information, see Instance types available only in a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). // // * [EC2-VPC] All instances have a network interface with a primary private // IPv4 address. If you don't specify this address, we choose one from the // IPv4 range of your subnet. // // * Not all instance types support IPv6 addresses. For more information, -// see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). +// see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // // * If you don't specify a security group ID, we use the default security -// group. For more information, see Security Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). +// group. For more information, see Security groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). // // * If any of the AMIs have a product code attached for which the user has // not subscribed, the request fails. @@ -34555,17 +37784,17 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // An instance is ready for you to use when it's in the running state. You can // check the state of your instance using DescribeInstances. You can tag instances // and EBS volumes during launch, after launch, or both. For more information, -// see CreateTags and Tagging Your Amazon EC2 Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). +// see CreateTags and Tagging your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). // // Linux instances have access to the public key of the key pair at boot. You // can use this key to provide secure access to the instance. Amazon EC2 public // images use this feature to provide secure access without passwords. For more -// information, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// information, see Key pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For troubleshooting, see What To Do If An Instance Immediately Terminates +// For troubleshooting, see What to do if an instance immediately terminates // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html), -// and Troubleshooting Connecting to Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) +// and Troubleshooting connecting to your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -34711,6 +37940,12 @@ func (c *EC2) SearchLocalGatewayRoutesRequest(input *SearchLocalGatewayRoutesInp Name: opSearchLocalGatewayRoutes, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -34754,6 +37989,58 @@ func (c *EC2) SearchLocalGatewayRoutesWithContext(ctx aws.Context, input *Search return out, req.Send() } +// SearchLocalGatewayRoutesPages iterates over the pages of a SearchLocalGatewayRoutes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SearchLocalGatewayRoutes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SearchLocalGatewayRoutes operation. +// pageNum := 0 +// err := client.SearchLocalGatewayRoutesPages(params, +// func(page *ec2.SearchLocalGatewayRoutesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) SearchLocalGatewayRoutesPages(input *SearchLocalGatewayRoutesInput, fn func(*SearchLocalGatewayRoutesOutput, bool) bool) error { + return c.SearchLocalGatewayRoutesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SearchLocalGatewayRoutesPagesWithContext same as SearchLocalGatewayRoutesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SearchLocalGatewayRoutesPagesWithContext(ctx aws.Context, input *SearchLocalGatewayRoutesInput, fn func(*SearchLocalGatewayRoutesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SearchLocalGatewayRoutesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SearchLocalGatewayRoutesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*SearchLocalGatewayRoutesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opSearchTransitGatewayMulticastGroups = "SearchTransitGatewayMulticastGroups" // SearchTransitGatewayMulticastGroupsRequest generates a "aws/request.Request" representing the @@ -34785,6 +38072,12 @@ func (c *EC2) SearchTransitGatewayMulticastGroupsRequest(input *SearchTransitGat Name: opSearchTransitGatewayMulticastGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -34829,6 +38122,58 @@ func (c *EC2) SearchTransitGatewayMulticastGroupsWithContext(ctx aws.Context, in return out, req.Send() } +// SearchTransitGatewayMulticastGroupsPages iterates over the pages of a SearchTransitGatewayMulticastGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SearchTransitGatewayMulticastGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SearchTransitGatewayMulticastGroups operation. +// pageNum := 0 +// err := client.SearchTransitGatewayMulticastGroupsPages(params, +// func(page *ec2.SearchTransitGatewayMulticastGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) SearchTransitGatewayMulticastGroupsPages(input *SearchTransitGatewayMulticastGroupsInput, fn func(*SearchTransitGatewayMulticastGroupsOutput, bool) bool) error { + return c.SearchTransitGatewayMulticastGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SearchTransitGatewayMulticastGroupsPagesWithContext same as SearchTransitGatewayMulticastGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SearchTransitGatewayMulticastGroupsPagesWithContext(ctx aws.Context, input *SearchTransitGatewayMulticastGroupsInput, fn func(*SearchTransitGatewayMulticastGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SearchTransitGatewayMulticastGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SearchTransitGatewayMulticastGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*SearchTransitGatewayMulticastGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opSearchTransitGatewayRoutes = "SearchTransitGatewayRoutes" // SearchTransitGatewayRoutesRequest generates a "aws/request.Request" representing the @@ -34962,8 +38307,8 @@ func (c *EC2) SendDiagnosticInterruptRequest(input *SendDiagnosticInterruptInput // system is configured to perform the required diagnostic tasks. // // For more information about configuring your operating system to generate -// a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic -// Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) +// a crash dump when a kernel panic or stop error occurs, see Send a diagnostic +// interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) // (Linux instances) or Send a Diagnostic Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/diagnostic-interrupt.html) // (Windows instances). // @@ -35060,7 +38405,7 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re // Performing this operation on an instance that uses an instance store as its // root device returns an error. // -// For more information, see Stopping Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// For more information, see Stopping instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -35143,7 +38488,7 @@ func (c *EC2) StartVpcEndpointServicePrivateDnsVerificationRequest(input *StartV // // Before the service provider runs this command, they must add a record to // the DNS server. For more information, see Adding a TXT Record to Your Domain's -// DNS Server (https://docs.aws.amazon.com/vpc/latest/userguide/ndpoint-services-dns-validation.html#add-dns-txt-record) +// DNS Server (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-services-dns-validation.html#add-dns-txt-record) // in the Amazon VPC User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -35223,7 +38568,7 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // You can use the Stop action to hibernate an instance if the instance is enabled // for hibernation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#enabling-hibernation) // and it meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. // // We don't charge usage for a stopped instance, or data transfer fees; however, @@ -35236,9 +38581,10 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // your Linux instance, Amazon EC2 charges a one-minute minimum for instance // usage, and thereafter charges per second for instance usage. // -// You can't start, stop, or hibernate Spot Instances, and you can't stop or -// hibernate instance store-backed instances. For information about using hibernation -// for Spot Instances, see Hibernating Interrupted Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#hibernate-spot-instances) +// You can't stop or hibernate instance store-backed instances. You can't use +// the Stop action to hibernate Spot Instances, but you can specify that Amazon +// EC2 should hibernate Spot Instances when they are interrupted. For more information, +// see Hibernating interrupted Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#hibernate-spot-instances) // in the Amazon Elastic Compute Cloud User Guide. // // When you stop or hibernate an instance, we shut it down. You can restart @@ -35254,13 +38600,13 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // an instance, the root device and any other devices attached during the instance // launch are automatically deleted. For more information about the differences // between rebooting, stopping, hibernating, and terminating instances, see -// Instance Lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // // When you stop an instance, we attempt to shut it down forcibly after a short // while. If your instance appears stuck in the stopping state after a period // of time, there may be an issue with the underlying host computer. For more -// information, see Troubleshooting Stopping Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) +// information, see Troubleshooting stopping your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -35430,11 +38776,11 @@ func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *re // an instance, any attached EBS volumes with the DeleteOnTermination block // device mapping parameter set to true are automatically deleted. For more // information about the differences between stopping and terminating instances, -// see Instance Lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// see Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For more information about troubleshooting, see Troubleshooting Terminating -// Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) +// For more information about troubleshooting, see Troubleshooting terminating +// your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -35659,7 +39005,7 @@ func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *re // UnmonitorInstances API operation for Amazon Elastic Compute Cloud. // // Disables detailed monitoring for a running instance. For more information, -// see Monitoring Your Instances and Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// see Monitoring your instances and volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -35894,8 +39240,7 @@ func (c *EC2) WithdrawByoipCidrRequest(input *WithdrawByoipCidrInput) (req *requ // WithdrawByoipCidr API operation for Amazon Elastic Compute Cloud. // -// Stops advertising an IPv4 address range that is provisioned as an address -// pool. +// Stops advertising an address range that is provisioned as an address pool. // // You can perform this operation at most once every 10 seconds, even if you // specify different address ranges each time. @@ -36429,7 +39774,57 @@ func (s *ActiveInstance) SetSpotInstanceRequestId(v string) *ActiveInstance { return s } -// Describes an Elastic IP address. +// An entry for a prefix list. +type AddPrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` + + // A description for the entry. + // + // Constraints: Up to 255 characters in length. + Description *string `type:"string"` +} + +// String returns the string representation +func (s AddPrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPrefixListEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddPrefixListEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddPrefixListEntry"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *AddPrefixListEntry) SetCidr(v string) *AddPrefixListEntry { + s.Cidr = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AddPrefixListEntry) SetDescription(v string) *AddPrefixListEntry { + s.Description = &v + return s +} + +// Describes an Elastic IP address, or a carrier IP address. type Address struct { _ struct{} `type:"structure"` @@ -36440,6 +39835,11 @@ type Address struct { // VPC. AssociationId *string `locationName:"associationId" type:"string"` + // The carrier IP address associated. This option is only available for network + // interfaces which reside in a subnet in a Wavelength Zone (for example an + // EC2 instance). + CarrierIp *string `locationName:"carrierIp" type:"string"` + // The customer-owned IP address. CustomerOwnedIp *string `locationName:"customerOwnedIp" type:"string"` @@ -36453,7 +39853,8 @@ type Address struct { // The ID of the instance that the address is associated with (if any). InstanceId *string `locationName:"instanceId" type:"string"` - // The name of the location from which the IP address is advertised. + // The name of the unique set of Availability Zones, Local Zones, or Wavelength + // Zones from which AWS advertises IP addresses. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` // The ID of the network interface. @@ -36497,6 +39898,12 @@ func (s *Address) SetAssociationId(v string) *Address { return s } +// SetCarrierIp sets the CarrierIp field's value. +func (s *Address) SetCarrierIp(v string) *Address { + s.CarrierIp = &v + return s +} + // SetCustomerOwnedIp sets the CustomerOwnedIp field's value. func (s *Address) SetCustomerOwnedIp(v string) *Address { s.CustomerOwnedIp = &v @@ -36566,8 +39973,8 @@ func (s *Address) SetTags(v []*Tag) *Address { type AdvertiseByoipCidrInput struct { _ struct{} `type:"structure"` - // The IPv4 address range, in CIDR notation. This must be the exact range that - // you provisioned. You can't advertise only a portion of the provisioned range. + // The address range, in CIDR notation. This must be the exact range that you + // provisioned. You can't advertise only a portion of the provisioned range. // // Cidr is a required field Cidr *string `type:"string" required:"true"` @@ -36649,9 +40056,11 @@ type AllocateAddressInput struct { // address from the address pool. CustomerOwnedIpv4Pool *string `type:"string"` - // Set to vpc to allocate the address for use with instances in a VPC. + // Indicates whether the Elastic IP address is for use with instances in a VPC + // or instances in EC2-Classic. // - // Default: The address is for use with instances in EC2-Classic. + // Default: If the Region supports EC2-Classic, the default is standard. Otherwise, + // the default is vpc. Domain *string `type:"string" enum:"DomainType"` // Checks whether you have the required permissions for the action, without @@ -36660,10 +40069,11 @@ type AllocateAddressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The location from which the IP address is advertised. Use this parameter - // to limit the address to this location. + // A unique set of Availability Zones, Local Zones, or Wavelength Zones from + // which AWS advertises IP addresses. Use this parameter to limit the IP address + // to this location. IP addresses cannot move between network border groups. // - // Use DescribeVpcs (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) + // Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) // to view the network border groups. // // You cannot use a network border group with EC2 Classic. If you attempt this @@ -36730,17 +40140,22 @@ type AllocateAddressOutput struct { // IP address for use with instances in a VPC. AllocationId *string `locationName:"allocationId" type:"string"` + // The carrier IP address. This option is only available for network interfaces + // which reside in a subnet in a Wavelength Zone (for example an EC2 instance). + CarrierIp *string `locationName:"carrierIp" type:"string"` + // The customer-owned IP address. CustomerOwnedIp *string `locationName:"customerOwnedIp" type:"string"` // The ID of the customer-owned address pool. CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` - // Indicates whether this Elastic IP address is for use with instances in EC2-Classic - // (standard) or instances in a VPC (vpc). + // Indicates whether the Elastic IP address is for use with instances in a VPC + // (vpc) or instances in EC2-Classic (standard). Domain *string `locationName:"domain" type:"string" enum:"DomainType"` - // The location from which the IP address is advertised. + // The set of Availability Zones, Local Zones, or Wavelength Zones from which + // AWS advertises IP addresses. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` // The Elastic IP address. @@ -36766,6 +40181,12 @@ func (s *AllocateAddressOutput) SetAllocationId(v string) *AllocateAddressOutput return s } +// SetCarrierIp sets the CarrierIp field's value. +func (s *AllocateAddressOutput) SetCarrierIp(v string) *AllocateAddressOutput { + s.CarrierIp = &v + return s +} + // SetCustomerOwnedIp sets the CustomerOwnedIp field's value. func (s *AllocateAddressOutput) SetCustomerOwnedIp(v string) *AllocateAddressOutput { s.CustomerOwnedIp = &v @@ -37616,6 +41037,109 @@ func (s AssociateDhcpOptionsOutput) GoString() string { return s.String() } +type AssociateEnclaveCertificateIamRoleInput struct { + _ struct{} `type:"structure"` + + // The ARN of the ACM certificate with which to associate the IAM role. + CertificateArn *string `min:"1" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ARN of the IAM role to associate with the ACM certificate. You can associate + // up to 16 IAM roles with an ACM certificate. + RoleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AssociateEnclaveCertificateIamRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEnclaveCertificateIamRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateEnclaveCertificateIamRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateEnclaveCertificateIamRoleInput"} + if s.CertificateArn != nil && len(*s.CertificateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *AssociateEnclaveCertificateIamRoleInput) SetCertificateArn(v string) *AssociateEnclaveCertificateIamRoleInput { + s.CertificateArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *AssociateEnclaveCertificateIamRoleInput) SetDryRun(v bool) *AssociateEnclaveCertificateIamRoleInput { + s.DryRun = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssociateEnclaveCertificateIamRoleInput) SetRoleArn(v string) *AssociateEnclaveCertificateIamRoleInput { + s.RoleArn = &v + return s +} + +type AssociateEnclaveCertificateIamRoleOutput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket to which the certificate was uploaded. + CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"` + + // The Amazon S3 object key where the certificate, certificate chain, and encrypted + // private key bundle are stored. The object key is formatted as follows: certificate_arn/role_arn. + CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"` + + // The ID of the AWS KMS CMK used to encrypt the private key of the certificate. + EncryptionKmsKeyId *string `locationName:"encryptionKmsKeyId" type:"string"` +} + +// String returns the string representation +func (s AssociateEnclaveCertificateIamRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEnclaveCertificateIamRoleOutput) GoString() string { + return s.String() +} + +// SetCertificateS3BucketName sets the CertificateS3BucketName field's value. +func (s *AssociateEnclaveCertificateIamRoleOutput) SetCertificateS3BucketName(v string) *AssociateEnclaveCertificateIamRoleOutput { + s.CertificateS3BucketName = &v + return s +} + +// SetCertificateS3ObjectKey sets the CertificateS3ObjectKey field's value. +func (s *AssociateEnclaveCertificateIamRoleOutput) SetCertificateS3ObjectKey(v string) *AssociateEnclaveCertificateIamRoleOutput { + s.CertificateS3ObjectKey = &v + return s +} + +// SetEncryptionKmsKeyId sets the EncryptionKmsKeyId field's value. +func (s *AssociateEnclaveCertificateIamRoleOutput) SetEncryptionKmsKeyId(v string) *AssociateEnclaveCertificateIamRoleOutput { + s.EncryptionKmsKeyId = &v + return s +} + type AssociateIamInstanceProfileInput struct { _ struct{} `type:"structure"` @@ -38051,14 +41575,23 @@ type AssociateVpcCidrBlockInput struct { // An IPv4 CIDR block to associate with the VPC. CidrBlock *string `type:"string"` + // An IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool + // in the request. + // + // To let Amazon choose the IPv6 CIDR block for you, omit this parameter. + Ipv6CidrBlock *string `type:"string"` + // The name of the location from which we advertise the IPV6 CIDR block. Use - // this parameter to limit the CiDR block to this location. + // this parameter to limit the CIDR block to this location. // // You must set AmazonProvidedIpv6CidrBlock to true to use this parameter. // // You can have one IPv6 CIDR block association per network border group. Ipv6CidrBlockNetworkBorderGroup *string `type:"string"` + // The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block. + Ipv6Pool *string `type:"string"` + // The ID of the VPC. // // VpcId is a required field @@ -38100,12 +41633,24 @@ func (s *AssociateVpcCidrBlockInput) SetCidrBlock(v string) *AssociateVpcCidrBlo return s } +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *AssociateVpcCidrBlockInput) SetIpv6CidrBlock(v string) *AssociateVpcCidrBlockInput { + s.Ipv6CidrBlock = &v + return s +} + // SetIpv6CidrBlockNetworkBorderGroup sets the Ipv6CidrBlockNetworkBorderGroup field's value. func (s *AssociateVpcCidrBlockInput) SetIpv6CidrBlockNetworkBorderGroup(v string) *AssociateVpcCidrBlockInput { s.Ipv6CidrBlockNetworkBorderGroup = &v return s } +// SetIpv6Pool sets the Ipv6Pool field's value. +func (s *AssociateVpcCidrBlockInput) SetIpv6Pool(v string) *AssociateVpcCidrBlockInput { + s.Ipv6Pool = &v + return s +} + // SetVpcId sets the VpcId field's value. func (s *AssociateVpcCidrBlockInput) SetVpcId(v string) *AssociateVpcCidrBlockInput { s.VpcId = &v @@ -38153,6 +41698,59 @@ func (s *AssociateVpcCidrBlockOutput) SetVpcId(v string) *AssociateVpcCidrBlockO return s } +// Information about the associated IAM roles. +type AssociatedRole struct { + _ struct{} `type:"structure"` + + // The ARN of the associated IAM role. + AssociatedRoleArn *string `locationName:"associatedRoleArn" min:"1" type:"string"` + + // The name of the Amazon S3 bucket in which the Amazon S3 object is stored. + CertificateS3BucketName *string `locationName:"certificateS3BucketName" type:"string"` + + // The key of the Amazon S3 object ey where the certificate, certificate chain, + // and encrypted private key bundle is stored. The object key is formated as + // follows: certificate_arn/role_arn. + CertificateS3ObjectKey *string `locationName:"certificateS3ObjectKey" type:"string"` + + // The ID of the KMS customer master key (CMK) used to encrypt the private key. + EncryptionKmsKeyId *string `locationName:"encryptionKmsKeyId" type:"string"` +} + +// String returns the string representation +func (s AssociatedRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociatedRole) GoString() string { + return s.String() +} + +// SetAssociatedRoleArn sets the AssociatedRoleArn field's value. +func (s *AssociatedRole) SetAssociatedRoleArn(v string) *AssociatedRole { + s.AssociatedRoleArn = &v + return s +} + +// SetCertificateS3BucketName sets the CertificateS3BucketName field's value. +func (s *AssociatedRole) SetCertificateS3BucketName(v string) *AssociatedRole { + s.CertificateS3BucketName = &v + return s +} + +// SetCertificateS3ObjectKey sets the CertificateS3ObjectKey field's value. +func (s *AssociatedRole) SetCertificateS3ObjectKey(v string) *AssociatedRole { + s.CertificateS3ObjectKey = &v + return s +} + +// SetEncryptionKmsKeyId sets the EncryptionKmsKeyId field's value. +func (s *AssociatedRole) SetEncryptionKmsKeyId(v string) *AssociatedRole { + s.EncryptionKmsKeyId = &v + return s +} + // Describes a target network that is associated with a Client VPN endpoint. // A target network is a subnet in a VPC. type AssociatedTargetNetwork struct { @@ -38420,6 +42018,11 @@ type AttachNetworkInterfaceInput struct { // InstanceId is a required field InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + // The index of the network card. Some instance types support multiple network + // cards. The primary network interface must be assigned to network card index + // 0. The default is network card index 0. + NetworkCardIndex *int64 `type:"integer"` + // The ID of the network interface. // // NetworkInterfaceId is a required field @@ -38473,6 +42076,12 @@ func (s *AttachNetworkInterfaceInput) SetInstanceId(v string) *AttachNetworkInte return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *AttachNetworkInterfaceInput) SetNetworkCardIndex(v int64) *AttachNetworkInterfaceInput { + s.NetworkCardIndex = &v + return s +} + // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *AttachNetworkInterfaceInput) SetNetworkInterfaceId(v string) *AttachNetworkInterfaceInput { s.NetworkInterfaceId = &v @@ -38485,6 +42094,9 @@ type AttachNetworkInterfaceOutput struct { // The ID of the network interface attachment. AttachmentId *string `locationName:"attachmentId" type:"string"` + + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` } // String returns the string representation @@ -38503,6 +42115,12 @@ func (s *AttachNetworkInterfaceOutput) SetAttachmentId(v string) *AttachNetworkI return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *AttachNetworkInterfaceOutput) SetNetworkCardIndex(v int64) *AttachNetworkInterfaceOutput { + s.NetworkCardIndex = &v + return s +} + type AttachVolumeInput struct { _ struct{} `type:"structure"` @@ -38793,11 +42411,14 @@ func (s *AuthorizationRule) SetStatus(v *ClientVpnAuthorizationRuleStatus) *Auth type AuthorizeClientVpnIngressInput struct { _ struct{} `type:"structure"` - // The ID of the Active Directory group to grant access. + // The ID of the group to grant access to, for example, the Active Directory + // group or identity provider (IdP) group. Required if AuthorizeAllGroups is + // false or not specified. AccessGroupId *string `type:"string"` - // Indicates whether to grant access to all clients. Use true to grant all clients - // who successfully establish a VPN connection access to the network. + // Indicates whether to grant access to all clients. Specify true to grant all + // clients who successfully establish a VPN connection access to the network. + // Must be set to true if AccessGroupId is not specified. AuthorizeAllGroups *bool `type:"boolean"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -39205,38 +42826,52 @@ func (s AuthorizeSecurityGroupIngressOutput) GoString() string { return s.String() } -// Describes an Availability Zone or Local Zone. +// Describes Availability Zones, Local Zones, and Wavelength Zones. type AvailabilityZone struct { _ struct{} `type:"structure"` // For Availability Zones, this parameter has the same value as the Region name. // // For Local Zones, the name of the associated group, for example us-west-2-lax-1. + // + // For Wavelength Zones, the name of the associated group, for example us-east-1-wl1-bos-wlz-1. GroupName *string `locationName:"groupName" type:"string"` - // Any messages about the Availability Zone or Local Zone. + // Any messages about the Availability Zone, Local Zone, or Wavelength Zone. Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"` - // The name of the location from which the address is advertised. + // The name of the network border group. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` // For Availability Zones, this parameter always has the value of opt-in-not-required. // - // For Local Zones, this parameter is the opt in status. The possible values - // are opted-in, and not-opted-in. + // For Local Zones and Wavelength Zones, this parameter is the opt-in status. + // The possible values are opted-in, and not-opted-in. OptInStatus *string `locationName:"optInStatus" type:"string" enum:"AvailabilityZoneOptInStatus"` + // The ID of the zone that handles some of the Local Zone or Wavelength Zone + // control plane operations, such as API calls. + ParentZoneId *string `locationName:"parentZoneId" type:"string"` + + // The name of the zone that handles some of the Local Zone or Wavelength Zone + // control plane operations, such as API calls. + ParentZoneName *string `locationName:"parentZoneName" type:"string"` + // The name of the Region. RegionName *string `locationName:"regionName" type:"string"` - // The state of the Availability Zone or Local Zone. + // The state of the Availability Zone, Local Zone, or Wavelength Zone. State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"` - // The ID of the Availability Zone or Local Zone. + // The ID of the Availability Zone, Local Zone, or Wavelength Zone. ZoneId *string `locationName:"zoneId" type:"string"` - // The name of the Availability Zone or Local Zone. + // The name of the Availability Zone, Local Zone, or Wavelength Zone. ZoneName *string `locationName:"zoneName" type:"string"` + + // The type of zone. The valid values are availability-zone, local-zone, and + // wavelength-zone. + ZoneType *string `locationName:"zoneType" type:"string"` } // String returns the string representation @@ -39273,6 +42908,18 @@ func (s *AvailabilityZone) SetOptInStatus(v string) *AvailabilityZone { return s } +// SetParentZoneId sets the ParentZoneId field's value. +func (s *AvailabilityZone) SetParentZoneId(v string) *AvailabilityZone { + s.ParentZoneId = &v + return s +} + +// SetParentZoneName sets the ParentZoneName field's value. +func (s *AvailabilityZone) SetParentZoneName(v string) *AvailabilityZone { + s.ParentZoneName = &v + return s +} + // SetRegionName sets the RegionName field's value. func (s *AvailabilityZone) SetRegionName(v string) *AvailabilityZone { s.RegionName = &v @@ -39297,11 +42944,18 @@ func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone { return s } -// Describes a message about an Availability Zone or Local Zone. +// SetZoneType sets the ZoneType field's value. +func (s *AvailabilityZone) SetZoneType(v string) *AvailabilityZone { + s.ZoneType = &v + return s +} + +// Describes a message about an Availability Zone, Local Zone, or Wavelength +// Zone. type AvailabilityZoneMessage struct { _ struct{} `type:"structure"` - // The message about the Availability Zone or Local Zone. + // The message about the Availability Zone, Local Zone, or Wavelength Zone. Message *string `locationName:"message" type:"string"` } @@ -39669,7 +43323,7 @@ func (s *BundleTaskError) SetMessage(v string) *BundleTaskError { type ByoipCidr struct { _ struct{} `type:"structure"` - // The public IPv4 address range, in CIDR notation. + // The address range, in CIDR notation. Cidr *string `locationName:"cidr" type:"string"` // The description of the address range. @@ -40684,6 +44338,39 @@ func (s *CapacityReservation) SetTotalInstanceCount(v int64) *CapacityReservatio return s } +// Describes a resource group to which a Capacity Reservation has been added. +type CapacityReservationGroup struct { + _ struct{} `type:"structure"` + + // The ARN of the resource group. + GroupArn *string `locationName:"groupArn" type:"string"` + + // The ID of the AWS account that owns the resource group. + OwnerId *string `locationName:"ownerId" type:"string"` +} + +// String returns the string representation +func (s CapacityReservationGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservationGroup) GoString() string { + return s.String() +} + +// SetGroupArn sets the GroupArn field's value. +func (s *CapacityReservationGroup) SetGroupArn(v string) *CapacityReservationGroup { + s.GroupArn = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *CapacityReservationGroup) SetOwnerId(v string) *CapacityReservationGroup { + s.OwnerId = &v + return s +} + // Describes the strategy for using unused Capacity Reservations for fulfilling // On-Demand capacity. // @@ -40692,7 +44379,7 @@ func (s *CapacityReservation) SetTotalInstanceCount(v int64) *CapacityReservatio // For more information about Capacity Reservations, see On-Demand Capacity // Reservations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html) // in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity -// Reservations in an EC2 Fleet, see EC2 Fleet Example Configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) +// Reservations in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) // in the Amazon Elastic Compute Cloud User Guide. type CapacityReservationOptions struct { _ struct{} `type:"structure"` @@ -40737,7 +44424,7 @@ func (s *CapacityReservationOptions) SetUsageStrategy(v string) *CapacityReserva // For more information about Capacity Reservations, see On-Demand Capacity // Reservations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-capacity-reservations.html) // in the Amazon Elastic Compute Cloud User Guide. For examples of using Capacity -// Reservations in an EC2 Fleet, see EC2 Fleet Example Configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) +// Reservations in an EC2 Fleet, see EC2 Fleet example configurations (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-examples.html) // in the Amazon Elastic Compute Cloud User Guide. type CapacityReservationOptionsRequest struct { _ struct{} `type:"structure"` @@ -40782,7 +44469,7 @@ func (s *CapacityReservationOptionsRequest) SetUsageStrategy(v string) *Capacity // to run as an On-Demand Instance or to run in any open Capacity Reservation // that has matching attributes (instance type, platform, Availability Zone). // Use the CapacityReservationTarget parameter to explicitly target a specific -// Capacity Reservation. +// Capacity Reservation or a Capacity Reservation group. type CapacityReservationSpecification struct { _ struct{} `type:"structure"` @@ -40796,7 +44483,8 @@ type CapacityReservationSpecification struct { // one is available. The instance runs as an On-Demand Instance. CapacityReservationPreference *string `type:"string" enum:"CapacityReservationPreference"` - // Information about the target Capacity Reservation. + // Information about the target Capacity Reservation or Capacity Reservation + // group. CapacityReservationTarget *CapacityReservationTarget `type:"structure"` } @@ -40827,7 +44515,8 @@ func (s *CapacityReservationSpecification) SetCapacityReservationTarget(v *Capac // instance is configured to run in On-Demand capacity, or if it is configured // in run in any open Capacity Reservation that has matching attributes (instance // type, platform, Availability Zone). The action returns the capacityReservationTarget -// response element if the instance explicily targets a specific Capacity Reservation. +// response element if the instance explicily targets a specific Capacity Reservation +// or Capacity Reservation group. type CapacityReservationSpecificationResponse struct { _ struct{} `type:"structure"` @@ -40841,7 +44530,8 @@ type CapacityReservationSpecificationResponse struct { // one is available. The instance runs in On-Demand capacity. CapacityReservationPreference *string `locationName:"capacityReservationPreference" type:"string" enum:"CapacityReservationPreference"` - // Information about the targeted Capacity Reservation. + // Information about the targeted Capacity Reservation or Capacity Reservation + // group. CapacityReservationTarget *CapacityReservationTargetResponse `locationName:"capacityReservationTarget" type:"structure"` } @@ -40867,12 +44557,15 @@ func (s *CapacityReservationSpecificationResponse) SetCapacityReservationTarget( return s } -// Describes a target Capacity Reservation. +// Describes a target Capacity Reservation or Capacity Reservation group. type CapacityReservationTarget struct { _ struct{} `type:"structure"` - // The ID of the Capacity Reservation. + // The ID of the Capacity Reservation in which to run the instance. CapacityReservationId *string `type:"string"` + + // The ARN of the Capacity Reservation resource group in which to run the instance. + CapacityReservationResourceGroupArn *string `type:"string"` } // String returns the string representation @@ -40891,12 +44584,21 @@ func (s *CapacityReservationTarget) SetCapacityReservationId(v string) *Capacity return s } -// Describes a target Capacity Reservation. +// SetCapacityReservationResourceGroupArn sets the CapacityReservationResourceGroupArn field's value. +func (s *CapacityReservationTarget) SetCapacityReservationResourceGroupArn(v string) *CapacityReservationTarget { + s.CapacityReservationResourceGroupArn = &v + return s +} + +// Describes a target Capacity Reservation or Capacity Reservation group. type CapacityReservationTargetResponse struct { _ struct{} `type:"structure"` - // The ID of the Capacity Reservation. + // The ID of the targeted Capacity Reservation. CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + + // The ARN of the targeted Capacity Reservation group. + CapacityReservationResourceGroupArn *string `locationName:"capacityReservationResourceGroupArn" type:"string"` } // String returns the string representation @@ -40915,6 +44617,72 @@ func (s *CapacityReservationTargetResponse) SetCapacityReservationId(v string) * return s } +// SetCapacityReservationResourceGroupArn sets the CapacityReservationResourceGroupArn field's value. +func (s *CapacityReservationTargetResponse) SetCapacityReservationResourceGroupArn(v string) *CapacityReservationTargetResponse { + s.CapacityReservationResourceGroupArn = &v + return s +} + +// Describes a carrier gateway. +type CarrierGateway struct { + _ struct{} `type:"structure"` + + // The ID of the carrier gateway. + CarrierGatewayId *string `locationName:"carrierGatewayId" type:"string"` + + // The AWS account ID of the owner of the carrier gateway. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The state of the carrier gateway. + State *string `locationName:"state" type:"string" enum:"CarrierGatewayState"` + + // The tags assigned to the carrier gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC associated with the carrier gateway. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s CarrierGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CarrierGateway) GoString() string { + return s.String() +} + +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *CarrierGateway) SetCarrierGatewayId(v string) *CarrierGateway { + s.CarrierGatewayId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *CarrierGateway) SetOwnerId(v string) *CarrierGateway { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *CarrierGateway) SetState(v string) *CarrierGateway { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CarrierGateway) SetTags(v []*Tag) *CarrierGateway { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CarrierGateway) SetVpcId(v string) *CarrierGateway { + s.VpcId = &v + return s +} + // Information about the client certificate used for authentication. type CertificateAuthentication struct { _ struct{} `type:"structure"` @@ -41225,6 +44993,84 @@ func (s *ClientCertificateRevocationListStatus) SetMessage(v string) *ClientCert return s } +// The options for managing connection authorization for new client connections. +type ClientConnectOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether client connect options are enabled. The default is false + // (not enabled). + Enabled *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function used for connection + // authorization. + LambdaFunctionArn *string `type:"string"` +} + +// String returns the string representation +func (s ClientConnectOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientConnectOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *ClientConnectOptions) SetEnabled(v bool) *ClientConnectOptions { + s.Enabled = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *ClientConnectOptions) SetLambdaFunctionArn(v string) *ClientConnectOptions { + s.LambdaFunctionArn = &v + return s +} + +// The options for managing connection authorization for new client connections. +type ClientConnectResponseOptions struct { + _ struct{} `type:"structure"` + + // Indicates whether client connect options are enabled. + Enabled *bool `locationName:"enabled" type:"boolean"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function used for connection + // authorization. + LambdaFunctionArn *string `locationName:"lambdaFunctionArn" type:"string"` + + // The status of any updates to the client connect options. + Status *ClientVpnEndpointAttributeStatus `locationName:"status" type:"structure"` +} + +// String returns the string representation +func (s ClientConnectResponseOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientConnectResponseOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *ClientConnectResponseOptions) SetEnabled(v bool) *ClientConnectResponseOptions { + s.Enabled = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *ClientConnectResponseOptions) SetLambdaFunctionArn(v string) *ClientConnectResponseOptions { + s.LambdaFunctionArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ClientConnectResponseOptions) SetStatus(v *ClientVpnEndpointAttributeStatus) *ClientConnectResponseOptions { + s.Status = v + return s +} + // Describes the client-specific data. type ClientData struct { _ struct{} `type:"structure"` @@ -41276,9 +45122,8 @@ func (s *ClientData) SetUploadStart(v time.Time) *ClientData { return s } -// Describes the authentication methods used by a Client VPN endpoint. Client -// VPN supports Active Directory and mutual authentication. For more information, -// see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication) +// Describes the authentication methods used by a Client VPN endpoint. For more +// information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/client-authentication.html) // in the AWS Client VPN Administrator Guide. type ClientVpnAuthentication struct { _ struct{} `type:"structure"` @@ -41286,6 +45131,9 @@ type ClientVpnAuthentication struct { // Information about the Active Directory, if applicable. ActiveDirectory *DirectoryServiceAuthentication `locationName:"activeDirectory" type:"structure"` + // Information about the IAM SAML identity provider, if applicable. + FederatedAuthentication *FederatedAuthentication `locationName:"federatedAuthentication" type:"structure"` + // Information about the authentication certificates, if applicable. MutualAuthentication *CertificateAuthentication `locationName:"mutualAuthentication" type:"structure"` @@ -41309,6 +45157,12 @@ func (s *ClientVpnAuthentication) SetActiveDirectory(v *DirectoryServiceAuthenti return s } +// SetFederatedAuthentication sets the FederatedAuthentication field's value. +func (s *ClientVpnAuthentication) SetFederatedAuthentication(v *FederatedAuthentication) *ClientVpnAuthentication { + s.FederatedAuthentication = v + return s +} + // SetMutualAuthentication sets the MutualAuthentication field's value. func (s *ClientVpnAuthentication) SetMutualAuthentication(v *CertificateAuthentication) *ClientVpnAuthentication { s.MutualAuthentication = v @@ -41322,8 +45176,7 @@ func (s *ClientVpnAuthentication) SetType(v string) *ClientVpnAuthentication { } // Describes the authentication method to be used by a Client VPN endpoint. -// Client VPN supports Active Directory and mutual authentication. For more -// information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication) +// For more information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication) // in the AWS Client VPN Administrator Guide. type ClientVpnAuthenticationRequest struct { _ struct{} `type:"structure"` @@ -41332,13 +45185,15 @@ type ClientVpnAuthenticationRequest struct { // provide this information if Type is directory-service-authentication. ActiveDirectory *DirectoryServiceAuthenticationRequest `type:"structure"` + // Information about the IAM SAML identity provider to be used, if applicable. + // You must provide this information if Type is federated-authentication. + FederatedAuthentication *FederatedAuthenticationRequest `type:"structure"` + // Information about the authentication certificates to be used, if applicable. // You must provide this information if Type is certificate-authentication. MutualAuthentication *CertificateAuthenticationRequest `type:"structure"` - // The type of client authentication to be used. Specify certificate-authentication - // to use certificate-based authentication, or directory-service-authentication - // to use Active Directory authentication. + // The type of client authentication to be used. Type *string `type:"string" enum:"ClientVpnAuthenticationType"` } @@ -41358,6 +45213,12 @@ func (s *ClientVpnAuthenticationRequest) SetActiveDirectory(v *DirectoryServiceA return s } +// SetFederatedAuthentication sets the FederatedAuthentication field's value. +func (s *ClientVpnAuthenticationRequest) SetFederatedAuthentication(v *FederatedAuthenticationRequest) *ClientVpnAuthenticationRequest { + s.FederatedAuthentication = v + return s +} + // SetMutualAuthentication sets the MutualAuthentication field's value. func (s *ClientVpnAuthenticationRequest) SetMutualAuthentication(v *CertificateAuthenticationRequest) *ClientVpnAuthenticationRequest { s.MutualAuthentication = v @@ -41438,6 +45299,10 @@ type ClientVpnConnection struct { // The number of packets sent by the client. IngressPackets *string `locationName:"ingressPackets" type:"string"` + // The statuses returned by the client connect handler for posture compliance, + // if applicable. + PostureComplianceStatuses []*string `locationName:"postureComplianceStatusSet" locationNameList:"item" type:"list"` + // The current state of the client connection. Status *ClientVpnConnectionStatus `locationName:"status" type:"structure"` @@ -41519,6 +45384,12 @@ func (s *ClientVpnConnection) SetIngressPackets(v string) *ClientVpnConnection { return s } +// SetPostureComplianceStatuses sets the PostureComplianceStatuses field's value. +func (s *ClientVpnConnection) SetPostureComplianceStatuses(v []*string) *ClientVpnConnection { + s.PostureComplianceStatuses = v + return s +} + // SetStatus sets the Status field's value. func (s *ClientVpnConnection) SetStatus(v *ClientVpnConnectionStatus) *ClientVpnConnection { s.Status = v @@ -41587,6 +45458,9 @@ type ClientVpnEndpoint struct { // are assigned. ClientCidrBlock *string `locationName:"clientCidrBlock" type:"string"` + // The options for managing connection authorization for new client connections. + ClientConnectOptions *ClientConnectResponseOptions `locationName:"clientConnectOptions" type:"structure"` + // The ID of the Client VPN endpoint. ClientVpnEndpointId *string `locationName:"clientVpnEndpointId" type:"string"` @@ -41609,6 +45483,12 @@ type ClientVpnEndpoint struct { // Information about the DNS servers to be used for DNS resolution. DnsServers []*string `locationName:"dnsServer" locationNameList:"item" type:"list"` + // The IDs of the security groups for the target network. + SecurityGroupIds []*string `locationName:"securityGroupIdSet" locationNameList:"item" type:"list"` + + // The URL of the self-service portal. + SelfServicePortalUrl *string `locationName:"selfServicePortalUrl" type:"string"` + // The ARN of the server certificate. ServerCertificateArn *string `locationName:"serverCertificateArn" type:"string"` @@ -41628,6 +45508,12 @@ type ClientVpnEndpoint struct { // The transport protocol used by the Client VPN endpoint. TransportProtocol *string `locationName:"transportProtocol" type:"string" enum:"TransportProtocol"` + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` + + // The port number for the Client VPN endpoint. + VpnPort *int64 `locationName:"vpnPort" type:"integer"` + // The protocol used by the VPN session. VpnProtocol *string `locationName:"vpnProtocol" type:"string" enum:"VpnProtocol"` } @@ -41660,6 +45546,12 @@ func (s *ClientVpnEndpoint) SetClientCidrBlock(v string) *ClientVpnEndpoint { return s } +// SetClientConnectOptions sets the ClientConnectOptions field's value. +func (s *ClientVpnEndpoint) SetClientConnectOptions(v *ClientConnectResponseOptions) *ClientVpnEndpoint { + s.ClientConnectOptions = v + return s +} + // SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. func (s *ClientVpnEndpoint) SetClientVpnEndpointId(v string) *ClientVpnEndpoint { s.ClientVpnEndpointId = &v @@ -41702,6 +45594,18 @@ func (s *ClientVpnEndpoint) SetDnsServers(v []*string) *ClientVpnEndpoint { return s } +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *ClientVpnEndpoint) SetSecurityGroupIds(v []*string) *ClientVpnEndpoint { + s.SecurityGroupIds = v + return s +} + +// SetSelfServicePortalUrl sets the SelfServicePortalUrl field's value. +func (s *ClientVpnEndpoint) SetSelfServicePortalUrl(v string) *ClientVpnEndpoint { + s.SelfServicePortalUrl = &v + return s +} + // SetServerCertificateArn sets the ServerCertificateArn field's value. func (s *ClientVpnEndpoint) SetServerCertificateArn(v string) *ClientVpnEndpoint { s.ServerCertificateArn = &v @@ -41732,12 +45636,57 @@ func (s *ClientVpnEndpoint) SetTransportProtocol(v string) *ClientVpnEndpoint { return s } +// SetVpcId sets the VpcId field's value. +func (s *ClientVpnEndpoint) SetVpcId(v string) *ClientVpnEndpoint { + s.VpcId = &v + return s +} + +// SetVpnPort sets the VpnPort field's value. +func (s *ClientVpnEndpoint) SetVpnPort(v int64) *ClientVpnEndpoint { + s.VpnPort = &v + return s +} + // SetVpnProtocol sets the VpnProtocol field's value. func (s *ClientVpnEndpoint) SetVpnProtocol(v string) *ClientVpnEndpoint { s.VpnProtocol = &v return s } +// Describes the status of the Client VPN endpoint attribute. +type ClientVpnEndpointAttributeStatus struct { + _ struct{} `type:"structure"` + + // The status code. + Code *string `locationName:"code" type:"string" enum:"ClientVpnEndpointAttributeStatusCode"` + + // The status message. + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ClientVpnEndpointAttributeStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClientVpnEndpointAttributeStatus) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ClientVpnEndpointAttributeStatus) SetCode(v string) *ClientVpnEndpointAttributeStatus { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ClientVpnEndpointAttributeStatus) SetMessage(v string) *ClientVpnEndpointAttributeStatus { + s.Message = &v + return s +} + // Describes the state of a Client VPN endpoint. type ClientVpnEndpointStatus struct { _ struct{} `type:"structure"` @@ -41956,6 +45905,9 @@ type CoipPool struct { // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` + // The ARN of the address pool. + PoolArn *string `locationName:"poolArn" min:"1" type:"string"` + // The address ranges of the address pool. PoolCidrs []*string `locationName:"poolCidrSet" locationNameList:"item" type:"list"` @@ -41982,6 +45934,12 @@ func (s *CoipPool) SetLocalGatewayRouteTableId(v string) *CoipPool { return s } +// SetPoolArn sets the PoolArn field's value. +func (s *CoipPool) SetPoolArn(v string) *CoipPool { + s.PoolArn = &v + return s +} + // SetPoolCidrs sets the PoolCidrs field's value. func (s *CoipPool) SetPoolCidrs(v []*string) *CoipPool { s.PoolCidrs = v @@ -42102,7 +46060,8 @@ func (s *ConfirmProductInstanceOutput) SetReturn(v bool) *ConfirmProductInstance type ConnectionLogOptions struct { _ struct{} `type:"structure"` - // The name of the CloudWatch Logs log group. + // The name of the CloudWatch Logs log group. Required if connection logging + // is enabled. CloudwatchLogGroup *string `type:"string"` // The name of the CloudWatch Logs log stream to which the connection data is @@ -42489,29 +46448,25 @@ type CopyImageInput struct { // in the Amazon Elastic Compute Cloud User Guide. Encrypted *bool `locationName:"encrypted" type:"boolean"` - // An identifier for the symmetric AWS Key Management Service (AWS KMS) customer - // master key (CMK) to use when creating the encrypted volume. This parameter - // is only required if you want to use a non-default CMK; if this parameter - // is not specified, the default CMK for EBS is used. If a KmsKeyId is specified, - // the Encrypted flag must also be set. + // The identifier of the symmetric AWS Key Management Service (AWS KMS) customer + // master key (CMK) to use when creating encrypted volumes. If this parameter + // is not specified, your AWS managed CMK for EBS is used. If you specify a + // CMK, you must also set the encrypted state to true. // - // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, - // or alias ARN. When using an alias name, prefix it with "alias/". For example: + // You can specify a CMK using any of the following: // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // * Key alias. For example, alias/ExampleAlias. // - // * Alias name: alias/ExampleAlias + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // - // AWS parses KmsKeyId asynchronously, meaning that the action you call may - // appear to complete even though you provided an invalid identifier. This action - // will eventually report failure. + // AWS authenticates the CMK asynchronously. Therefore, if you specify an identifier + // that is not valid, the action can appear to complete, but eventually fails. // - // The specified CMK must exist in the Region that the snapshot is being copied - // to. + // The specified CMK must exist in the destination Region. // // Amazon EBS does not support asymmetric CMKs. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` @@ -42670,11 +46625,11 @@ type CopySnapshotInput struct { // // You can specify the CMK using any of the following: // - // * Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. // // * Key alias. For example, alias/ExampleAlias. // - // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. // // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // @@ -42903,8 +46858,6 @@ type CreateCapacityReservationInput struct { // Unique, case-sensitive identifier that you provide to ensure the idempotency // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). - // - // Constraint: Maximum 64 ASCII characters. ClientToken *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -43132,6 +47085,98 @@ func (s *CreateCapacityReservationOutput) SetCapacityReservation(v *CapacityRese return s } +type CreateCarrierGatewayInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tags to associate with the carrier gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the VPC to associate with the carrier gateway. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCarrierGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCarrierGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCarrierGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCarrierGatewayInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateCarrierGatewayInput) SetClientToken(v string) *CreateCarrierGatewayInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateCarrierGatewayInput) SetDryRun(v bool) *CreateCarrierGatewayInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateCarrierGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateCarrierGatewayInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateCarrierGatewayInput) SetVpcId(v string) *CreateCarrierGatewayInput { + s.VpcId = &v + return s +} + +type CreateCarrierGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the carrier gateway. + CarrierGateway *CarrierGateway `locationName:"carrierGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateCarrierGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCarrierGatewayOutput) GoString() string { + return s.String() +} + +// SetCarrierGateway sets the CarrierGateway field's value. +func (s *CreateCarrierGatewayOutput) SetCarrierGateway(v *CarrierGateway) *CreateCarrierGatewayOutput { + s.CarrierGateway = v + return s +} + type CreateClientVpnEndpointInput struct { _ struct{} `type:"structure"` @@ -43149,6 +47194,9 @@ type CreateClientVpnEndpointInput struct { // ClientCidrBlock is a required field ClientCidrBlock *string `type:"string" required:"true"` + // The options for managing connection authorization for new client connections. + ClientConnectOptions *ClientConnectOptions `type:"structure"` + // Unique, case-sensitive identifier that you provide to ensure the idempotency // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `type:"string" idempotencyToken:"true"` @@ -43183,6 +47231,15 @@ type CreateClientVpnEndpointInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The IDs of one or more security groups to apply to the target network. You + // must also specify the ID of the VPC that contains the security groups. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + // Specify whether to enable the self-service portal for the Client VPN endpoint. + // + // Default Value: enabled + SelfServicePortal *string `type:"string" enum:"SelfServicePortal"` + // The ARN of the server certificate. For more information, see the AWS Certificate // Manager User Guide (https://docs.aws.amazon.com/acm/latest/userguide/). // @@ -43205,6 +47262,18 @@ type CreateClientVpnEndpointInput struct { // // Default value: udp TransportProtocol *string `type:"string" enum:"TransportProtocol"` + + // The ID of the VPC to associate with the Client VPN endpoint. If no security + // group IDs are specified in the request, the default security group for the + // VPC is applied. + VpcId *string `type:"string"` + + // The port number to assign to the Client VPN endpoint for TCP and UDP traffic. + // + // Valid Values: 443 | 1194 + // + // Default Value: 443 + VpnPort *int64 `type:"integer"` } // String returns the string representation @@ -43251,6 +47320,12 @@ func (s *CreateClientVpnEndpointInput) SetClientCidrBlock(v string) *CreateClien return s } +// SetClientConnectOptions sets the ClientConnectOptions field's value. +func (s *CreateClientVpnEndpointInput) SetClientConnectOptions(v *ClientConnectOptions) *CreateClientVpnEndpointInput { + s.ClientConnectOptions = v + return s +} + // SetClientToken sets the ClientToken field's value. func (s *CreateClientVpnEndpointInput) SetClientToken(v string) *CreateClientVpnEndpointInput { s.ClientToken = &v @@ -43281,6 +47356,18 @@ func (s *CreateClientVpnEndpointInput) SetDryRun(v bool) *CreateClientVpnEndpoin return s } +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateClientVpnEndpointInput) SetSecurityGroupIds(v []*string) *CreateClientVpnEndpointInput { + s.SecurityGroupIds = v + return s +} + +// SetSelfServicePortal sets the SelfServicePortal field's value. +func (s *CreateClientVpnEndpointInput) SetSelfServicePortal(v string) *CreateClientVpnEndpointInput { + s.SelfServicePortal = &v + return s +} + // SetServerCertificateArn sets the ServerCertificateArn field's value. func (s *CreateClientVpnEndpointInput) SetServerCertificateArn(v string) *CreateClientVpnEndpointInput { s.ServerCertificateArn = &v @@ -43305,6 +47392,18 @@ func (s *CreateClientVpnEndpointInput) SetTransportProtocol(v string) *CreateCli return s } +// SetVpcId sets the VpcId field's value. +func (s *CreateClientVpnEndpointInput) SetVpcId(v string) *CreateClientVpnEndpointInput { + s.VpcId = &v + return s +} + +// SetVpnPort sets the VpnPort field's value. +func (s *CreateClientVpnEndpointInput) SetVpnPort(v int64) *CreateClientVpnEndpointInput { + s.VpnPort = &v + return s +} + type CreateClientVpnEndpointOutput struct { _ struct{} `type:"structure"` @@ -43370,8 +47469,7 @@ type CreateClientVpnRouteInput struct { // * To add a route for an on-premises network, enter the AWS Site-to-Site // VPN connection's IPv4 CIDR range // - // Route address ranges cannot overlap with the CIDR range specified for client - // allocation. + // * To add a route for the local network, enter the client CIDR range // // DestinationCidrBlock is a required field DestinationCidrBlock *string `type:"string" required:"true"` @@ -43385,6 +47483,8 @@ type CreateClientVpnRouteInput struct { // The ID of the subnet through which you want to route traffic. The specified // subnet must be an existing target network of the Client VPN endpoint. // + // Alternatively, if you're adding a route for the local network, specify local. + // // TargetVpcSubnetId is a required field TargetVpcSubnetId *string `type:"string" required:"true"` } @@ -43506,6 +47606,9 @@ type CreateCustomerGatewayInput struct { // The address must be static. PublicIp *string `locationName:"IpAddress" type:"string"` + // The tags to apply to the customer gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The type of VPN connection that this customer gateway supports (ipsec.1). // // Type is a required field @@ -43568,6 +47671,12 @@ func (s *CreateCustomerGatewayInput) SetPublicIp(v string) *CreateCustomerGatewa return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateCustomerGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateCustomerGatewayInput { + s.TagSpecifications = v + return s +} + // SetType sets the Type field's value. func (s *CreateCustomerGatewayInput) SetType(v string) *CreateCustomerGatewayInput { s.Type = &v @@ -43733,6 +47842,9 @@ type CreateDhcpOptionsInput struct { // the required permissions, the error response is DryRunOperation. Otherwise, // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the DHCP option. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -43770,6 +47882,12 @@ func (s *CreateDhcpOptionsInput) SetDryRun(v bool) *CreateDhcpOptionsInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateDhcpOptionsInput) SetTagSpecifications(v []*TagSpecification) *CreateDhcpOptionsInput { + s.TagSpecifications = v + return s +} + type CreateDhcpOptionsOutput struct { _ struct{} `type:"structure"` @@ -43806,6 +47924,9 @@ type CreateEgressOnlyInternetGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The tags to assign to the egress-only internet gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC for which to create the egress-only internet gateway. // // VpcId is a required field @@ -43847,6 +47968,12 @@ func (s *CreateEgressOnlyInternetGatewayInput) SetDryRun(v bool) *CreateEgressOn return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateEgressOnlyInternetGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateEgressOnlyInternetGatewayInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateEgressOnlyInternetGatewayInput) SetVpcId(v string) *CreateEgressOnlyInternetGatewayInput { s.VpcId = &v @@ -43977,7 +48104,7 @@ type CreateFleetInput struct { // The key-value pair for tagging the EC2 Fleet request on creation. The value // for ResourceType must be fleet, otherwise the fleet request fails. To tag // instances at launch, specify the tags in the launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template). - // For information about tagging after launch, see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). + // For information about tagging after launch, see Tagging your resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The number of units to request. @@ -43989,13 +48116,22 @@ type CreateFleetInput struct { // expires. TerminateInstancesWithExpiration *bool `type:"boolean"` - // The type of the request. By default, the EC2 Fleet places an asynchronous - // request for your desired capacity, and maintains it by replenishing interrupted - // Spot Instances (maintain). A value of instant places a synchronous one-time - // request, and returns errors for any instances that could not be launched. - // A value of request places an asynchronous one-time request without maintaining - // capacity or submitting requests in alternative capacity pools if capacity - // is unavailable. For more information, see EC2 Fleet Request Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-request-type) + // The type of request. The default value is maintain. + // + // * maintain - The EC2 Fleet plaees an asynchronous request for your desired + // capacity, and continues to maintain your desired Spot capacity by replenishing + // interrupted Spot Instances. + // + // * request - The EC2 Fleet places an asynchronous one-time request for + // your desired capacity, but does submit Spot requests in alternative capacity + // pools if Spot capacity is unavailable, and does not maintain Spot capacity + // if Spot Instances are interrupted. + // + // * instant - The EC2 Fleet places a synchronous one-time request for your + // desired capacity, and returns errors for any instances that could not + // be launched. + // + // For more information, see EC2 Fleet request types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-request-type) // in the Amazon Elastic Compute Cloud User Guide. Type *string `type:"string" enum:"FleetType"` @@ -44290,8 +48426,6 @@ type CreateFlowLogsInput struct { // // Specify the fields using the ${field-id} format, separated by spaces. For // the AWS CLI, use single quotation marks (' ') to surround the parameter value. - // - // Only applicable to flow logs that are published to an Amazon S3 bucket. LogFormat *string `type:"string"` // The name of a new or existing CloudWatch Logs log group where Amazon EC2 @@ -44301,6 +48435,17 @@ type CreateFlowLogsInput struct { // or LogGroupName. LogGroupName *string `type:"string"` + // The maximum interval of time during which a flow of packets is captured and + // aggregated into a flow log record. You can specify 60 seconds (1 minute) + // or 600 seconds (10 minutes). + // + // When a network interface is attached to a Nitro-based instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances), + // the aggregation interval is always 60 seconds or less, regardless of the + // value that you specify. + // + // Default: 600 + MaxAggregationInterval *int64 `type:"integer"` + // The ID of the subnet, network interface, or VPC for which you want to create // a flow log. // @@ -44315,6 +48460,9 @@ type CreateFlowLogsInput struct { // ResourceType is a required field ResourceType *string `type:"string" required:"true" enum:"FlowLogsResourceType"` + // The tags to apply to the flow logs. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The type of traffic to log. You can log traffic that the resource accepts // or rejects, or all traffic. // @@ -44393,6 +48541,12 @@ func (s *CreateFlowLogsInput) SetLogGroupName(v string) *CreateFlowLogsInput { return s } +// SetMaxAggregationInterval sets the MaxAggregationInterval field's value. +func (s *CreateFlowLogsInput) SetMaxAggregationInterval(v int64) *CreateFlowLogsInput { + s.MaxAggregationInterval = &v + return s +} + // SetResourceIds sets the ResourceIds field's value. func (s *CreateFlowLogsInput) SetResourceIds(v []*string) *CreateFlowLogsInput { s.ResourceIds = v @@ -44405,6 +48559,12 @@ func (s *CreateFlowLogsInput) SetResourceType(v string) *CreateFlowLogsInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateFlowLogsInput) SetTagSpecifications(v []*TagSpecification) *CreateFlowLogsInput { + s.TagSpecifications = v + return s +} + // SetTrafficType sets the TrafficType field's value. func (s *CreateFlowLogsInput) SetTrafficType(v string) *CreateFlowLogsInput { s.TrafficType = &v @@ -44709,19 +48869,26 @@ type CreateInstanceExportTaskInput struct { _ struct{} `type:"structure"` // A description for the conversion task or the resource being exported. The - // maximum length is 255 bytes. + // maximum length is 255 characters. Description *string `locationName:"description" type:"string"` // The format and location for an instance export task. - ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure"` + // + // ExportToS3Task is a required field + ExportToS3Task *ExportToS3TaskSpecification `locationName:"exportToS3" type:"structure" required:"true"` // The ID of the instance. // // InstanceId is a required field InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + // The tags to apply to the instance export task during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The target virtualization environment. - TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` + // + // TargetEnvironment is a required field + TargetEnvironment *string `locationName:"targetEnvironment" type:"string" required:"true" enum:"ExportEnvironment"` } // String returns the string representation @@ -44737,9 +48904,15 @@ func (s CreateInstanceExportTaskInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateInstanceExportTaskInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateInstanceExportTaskInput"} + if s.ExportToS3Task == nil { + invalidParams.Add(request.NewErrParamRequired("ExportToS3Task")) + } if s.InstanceId == nil { invalidParams.Add(request.NewErrParamRequired("InstanceId")) } + if s.TargetEnvironment == nil { + invalidParams.Add(request.NewErrParamRequired("TargetEnvironment")) + } if invalidParams.Len() > 0 { return invalidParams @@ -44765,6 +48938,12 @@ func (s *CreateInstanceExportTaskInput) SetInstanceId(v string) *CreateInstanceE return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateInstanceExportTaskInput) SetTagSpecifications(v []*TagSpecification) *CreateInstanceExportTaskInput { + s.TagSpecifications = v + return s +} + // SetTargetEnvironment sets the TargetEnvironment field's value. func (s *CreateInstanceExportTaskInput) SetTargetEnvironment(v string) *CreateInstanceExportTaskInput { s.TargetEnvironment = &v @@ -44802,6 +48981,9 @@ type CreateInternetGatewayInput struct { // the required permissions, the error response is DryRunOperation. Otherwise, // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the internet gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -44820,6 +49002,12 @@ func (s *CreateInternetGatewayInput) SetDryRun(v bool) *CreateInternetGatewayInp return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateInternetGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateInternetGatewayInput { + s.TagSpecifications = v + return s +} + type CreateInternetGatewayOutput struct { _ struct{} `type:"structure"` @@ -44858,6 +49046,9 @@ type CreateKeyPairInput struct { // // KeyName is a required field KeyName *string `type:"string" required:"true"` + + // The tags to apply to the new key pair. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -44895,6 +49086,12 @@ func (s *CreateKeyPairInput) SetKeyName(v string) *CreateKeyPairInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateKeyPairInput) SetTagSpecifications(v []*TagSpecification) *CreateKeyPairInput { + s.TagSpecifications = v + return s +} + // Describes a key pair. type CreateKeyPairOutput struct { _ struct{} `type:"structure"` @@ -44910,6 +49107,9 @@ type CreateKeyPairOutput struct { // The ID of the key pair. KeyPairId *string `locationName:"keyPairId" type:"string"` + + // Any tags applied to the key pair. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -44946,6 +49146,12 @@ func (s *CreateKeyPairOutput) SetKeyPairId(v string) *CreateKeyPairOutput { return s } +// SetTags sets the Tags field's value. +func (s *CreateKeyPairOutput) SetTags(v []*Tag) *CreateKeyPairOutput { + s.Tags = v + return s +} + type CreateLaunchTemplateInput struct { _ struct{} `type:"structure"` @@ -45053,6 +49259,11 @@ type CreateLaunchTemplateOutput struct { // Information about the launch template. LaunchTemplate *LaunchTemplate `locationName:"launchTemplate" type:"structure"` + + // If the launch template contains parameters or parameter combinations that + // are not valid, an error code and an error message are returned for each issue + // that's found. + Warning *ValidationWarning `locationName:"warning" type:"structure"` } // String returns the string representation @@ -45071,6 +49282,12 @@ func (s *CreateLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *Creat return s } +// SetWarning sets the Warning field's value. +func (s *CreateLaunchTemplateOutput) SetWarning(v *ValidationWarning) *CreateLaunchTemplateOutput { + s.Warning = v + return s +} + type CreateLaunchTemplateVersionInput struct { _ struct{} `type:"structure"` @@ -45188,6 +49405,11 @@ type CreateLaunchTemplateVersionOutput struct { // Information about the launch template version. LaunchTemplateVersion *LaunchTemplateVersion `locationName:"launchTemplateVersion" type:"structure"` + + // If the new version of the launch template contains parameters or parameter + // combinations that are not valid, an error code and an error message are returned + // for each issue that's found. + Warning *ValidationWarning `locationName:"warning" type:"structure"` } // String returns the string representation @@ -45206,6 +49428,12 @@ func (s *CreateLaunchTemplateVersionOutput) SetLaunchTemplateVersion(v *LaunchTe return s } +// SetWarning sets the Warning field's value. +func (s *CreateLaunchTemplateVersionOutput) SetWarning(v *ValidationWarning) *CreateLaunchTemplateVersionOutput { + s.Warning = v + return s +} + type CreateLocalGatewayRouteInput struct { _ struct{} `type:"structure"` @@ -45322,6 +49550,9 @@ type CreateLocalGatewayRouteTableVpcAssociationInput struct { // LocalGatewayRouteTableId is a required field LocalGatewayRouteTableId *string `type:"string" required:"true"` + // The tags to assign to the local gateway route table VPC association. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -45366,6 +49597,12 @@ func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetLocalGatewayRouteTa return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetTagSpecifications(v []*TagSpecification) *CreateLocalGatewayRouteTableVpcAssociationInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetVpcId(v string) *CreateLocalGatewayRouteTableVpcAssociationInput { s.VpcId = &v @@ -45395,6 +49632,151 @@ func (s *CreateLocalGatewayRouteTableVpcAssociationOutput) SetLocalGatewayRouteT return s } +type CreateManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // The IP address type. + // + // Valid Values: IPv4 | IPv6 + // + // AddressFamily is a required field + AddressFamily *string `type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Up to 255 UTF-8 characters in length. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more entries for the prefix list. + Entries []*AddPrefixListEntry `locationName:"Entry" type:"list"` + + // The maximum number of entries for the prefix list. + // + // MaxEntries is a required field + MaxEntries *int64 `type:"integer" required:"true"` + + // A name for the prefix list. + // + // Constraints: Up to 255 characters in length. The name cannot start with com.amazonaws. + // + // PrefixListName is a required field + PrefixListName *string `type:"string" required:"true"` + + // The tags to apply to the prefix list during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateManagedPrefixListInput"} + if s.AddressFamily == nil { + invalidParams.Add(request.NewErrParamRequired("AddressFamily")) + } + if s.MaxEntries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxEntries")) + } + if s.PrefixListName == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListName")) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *CreateManagedPrefixListInput) SetAddressFamily(v string) *CreateManagedPrefixListInput { + s.AddressFamily = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateManagedPrefixListInput) SetClientToken(v string) *CreateManagedPrefixListInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateManagedPrefixListInput) SetDryRun(v bool) *CreateManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetEntries sets the Entries field's value. +func (s *CreateManagedPrefixListInput) SetEntries(v []*AddPrefixListEntry) *CreateManagedPrefixListInput { + s.Entries = v + return s +} + +// SetMaxEntries sets the MaxEntries field's value. +func (s *CreateManagedPrefixListInput) SetMaxEntries(v int64) *CreateManagedPrefixListInput { + s.MaxEntries = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *CreateManagedPrefixListInput) SetPrefixListName(v string) *CreateManagedPrefixListInput { + s.PrefixListName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateManagedPrefixListInput) SetTagSpecifications(v []*TagSpecification) *CreateManagedPrefixListInput { + s.TagSpecifications = v + return s +} + +type CreateManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s CreateManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *CreateManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *CreateManagedPrefixListOutput { + s.PrefixList = v + return s +} + type CreateNatGatewayInput struct { _ struct{} `type:"structure"` @@ -45409,12 +49791,21 @@ type CreateNatGatewayInput struct { // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). // // Constraint: Maximum 64 ASCII characters. - ClientToken *string `type:"string"` + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` // The subnet in which to create the NAT gateway. // // SubnetId is a required field SubnetId *string `type:"string" required:"true"` + + // The tags to assign to the NAT gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -45455,12 +49846,24 @@ func (s *CreateNatGatewayInput) SetClientToken(v string) *CreateNatGatewayInput return s } +// SetDryRun sets the DryRun field's value. +func (s *CreateNatGatewayInput) SetDryRun(v bool) *CreateNatGatewayInput { + s.DryRun = &v + return s +} + // SetSubnetId sets the SubnetId field's value. func (s *CreateNatGatewayInput) SetSubnetId(v string) *CreateNatGatewayInput { s.SubnetId = &v return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNatGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateNatGatewayInput { + s.TagSpecifications = v + return s +} + type CreateNatGatewayOutput struct { _ struct{} `type:"structure"` @@ -45498,6 +49901,8 @@ type CreateNetworkAclEntryInput struct { _ struct{} `type:"structure"` // The IPv4 network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. CidrBlock *string `locationName:"cidrBlock" type:"string"` // Checks whether you have the required permissions for the action, without @@ -45672,6 +50077,9 @@ type CreateNetworkAclInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // The tags to assign to the network ACL. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -45707,6 +50115,12 @@ func (s *CreateNetworkAclInput) SetDryRun(v bool) *CreateNetworkAclInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNetworkAclInput) SetTagSpecifications(v []*TagSpecification) *CreateNetworkAclInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateNetworkAclInput) SetVpcId(v string) *CreateNetworkAclInput { s.VpcId = &v @@ -45793,6 +50207,9 @@ type CreateNetworkInterfaceInput struct { // // SubnetId is a required field SubnetId *string `locationName:"subnetId" type:"string" required:"true"` + + // The tags to apply to the new network interface. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -45878,6 +50295,12 @@ func (s *CreateNetworkInterfaceInput) SetSubnetId(v string) *CreateNetworkInterf return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNetworkInterfaceInput) SetTagSpecifications(v []*TagSpecification) *CreateNetworkInterfaceInput { + s.TagSpecifications = v + return s +} + // Contains the output of CreateNetworkInterface. type CreateNetworkInterfaceOutput struct { _ struct{} `type:"structure"` @@ -46029,6 +50452,9 @@ type CreatePlacementGroupInput struct { // The placement strategy. Strategy *string `locationName:"strategy" type:"string" enum:"PlacementStrategy"` + + // The tags to apply to the new placement group. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -46065,8 +50491,17 @@ func (s *CreatePlacementGroupInput) SetStrategy(v string) *CreatePlacementGroupI return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreatePlacementGroupInput) SetTagSpecifications(v []*TagSpecification) *CreatePlacementGroupInput { + s.TagSpecifications = v + return s +} + type CreatePlacementGroupOutput struct { _ struct{} `type:"structure"` + + // Describes a placement group. + PlacementGroup *PlacementGroup `locationName:"placementGroup" type:"structure"` } // String returns the string representation @@ -46079,6 +50514,12 @@ func (s CreatePlacementGroupOutput) GoString() string { return s.String() } +// SetPlacementGroup sets the PlacementGroup field's value. +func (s *CreatePlacementGroupOutput) SetPlacementGroup(v *PlacementGroup) *CreatePlacementGroupOutput { + s.PlacementGroup = v + return s +} + // Contains the parameters for CreateReservedInstancesListing. type CreateReservedInstancesListingInput struct { _ struct{} `type:"structure"` @@ -46193,14 +50634,25 @@ func (s *CreateReservedInstancesListingOutput) SetReservedInstancesListings(v [] type CreateRouteInput struct { _ struct{} `type:"structure"` + // The ID of the carrier gateway. + // + // You can only use this option when the VPC contains a subnet which is associated + // with a Wavelength Zone. + CarrierGatewayId *string `type:"string"` + // The IPv4 CIDR address block used for the destination match. Routing decisions - // are based on the most specific match. + // are based on the most specific match. We modify the specified CIDR block + // to its canonical form; for example, if you specify 100.68.0.18/18, we modify + // it to 100.68.0.0/18. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` // The IPv6 CIDR block used for the destination match. Routing decisions are // based on the most specific match. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + // The ID of a prefix list used for the destination match. + DestinationPrefixListId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -46262,6 +50714,12 @@ func (s *CreateRouteInput) Validate() error { return nil } +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *CreateRouteInput) SetCarrierGatewayId(v string) *CreateRouteInput { + s.CarrierGatewayId = &v + return s +} + // SetDestinationCidrBlock sets the DestinationCidrBlock field's value. func (s *CreateRouteInput) SetDestinationCidrBlock(v string) *CreateRouteInput { s.DestinationCidrBlock = &v @@ -46274,6 +50732,12 @@ func (s *CreateRouteInput) SetDestinationIpv6CidrBlock(v string) *CreateRouteInp return s } +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *CreateRouteInput) SetDestinationPrefixListId(v string) *CreateRouteInput { + s.DestinationPrefixListId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *CreateRouteInput) SetDryRun(v bool) *CreateRouteInput { s.DryRun = &v @@ -46366,6 +50830,9 @@ type CreateRouteTableInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // The tags to assign to the route table. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -46401,6 +50868,12 @@ func (s *CreateRouteTableInput) SetDryRun(v bool) *CreateRouteTableInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateRouteTableInput) SetTagSpecifications(v []*TagSpecification) *CreateRouteTableInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateRouteTableInput) SetVpcId(v string) *CreateRouteTableInput { s.VpcId = &v @@ -46461,6 +50934,9 @@ type CreateSecurityGroupInput struct { // GroupName is a required field GroupName *string `type:"string" required:"true"` + // The tags to assign to the security group. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // [EC2-VPC] The ID of the VPC. Required for EC2-VPC. VpcId *string `type:"string"` } @@ -46509,6 +50985,12 @@ func (s *CreateSecurityGroupInput) SetGroupName(v string) *CreateSecurityGroupIn return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSecurityGroupInput) SetTagSpecifications(v []*TagSpecification) *CreateSecurityGroupInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateSecurityGroupInput) SetVpcId(v string) *CreateSecurityGroupInput { s.VpcId = &v @@ -46520,6 +51002,9 @@ type CreateSecurityGroupOutput struct { // The ID of the security group. GroupId *string `locationName:"groupId" type:"string"` + + // The tags assigned to the security group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -46538,6 +51023,12 @@ func (s *CreateSecurityGroupOutput) SetGroupId(v string) *CreateSecurityGroupOut return s } +// SetTags sets the Tags field's value. +func (s *CreateSecurityGroupOutput) SetTags(v []*Tag) *CreateSecurityGroupOutput { + s.Tags = v + return s +} + type CreateSnapshotInput struct { _ struct{} `type:"structure"` @@ -46710,7 +51201,10 @@ func (s *CreateSnapshotsOutput) SetSnapshots(v []*SnapshotInfo) *CreateSnapshots type CreateSpotDatafeedSubscriptionInput struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket in which to store the Spot Instance data feed. + // The name of the Amazon S3 bucket in which to store the Spot Instance data + // feed. For more information about bucket names, see Rules for bucket naming + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules) + // in the Amazon S3 Developer Guide. // // Bucket is a required field Bucket *string `locationName:"bucket" type:"string" required:"true"` @@ -46721,7 +51215,7 @@ type CreateSpotDatafeedSubscriptionInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // A prefix for the data feed file names. + // The prefix for the data feed file names. Prefix *string `locationName:"prefix" type:"string"` } @@ -46802,12 +51296,17 @@ type CreateSubnetInput struct { // for example us-west-2-lax-1a. For information about the Regions that support // Local Zones, see Available Regions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) // in the Amazon Elastic Compute Cloud User Guide. + // + // To create a subnet in an Outpost, set this value to the Availability Zone + // for the Outpost and specify the Outpost ARN. AvailabilityZone *string `type:"string"` // The AZ ID or the Local Zone ID of the subnet. AvailabilityZoneId *string `type:"string"` // The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. // // CidrBlock is a required field CidrBlock *string `type:"string" required:"true"` @@ -46822,9 +51321,13 @@ type CreateSubnetInput struct { // must use a /64 prefix length. Ipv6CidrBlock *string `type:"string"` - // The Amazon Resource Name (ARN) of the Outpost. + // The Amazon Resource Name (ARN) of the Outpost. If you specify an Outpost + // ARN, you must also specify the Availability Zone of the Outpost subnet. OutpostArn *string `type:"string"` + // The tags to assign to the subnet. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -46893,6 +51396,12 @@ func (s *CreateSubnetInput) SetOutpostArn(v string) *CreateSubnetInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSubnetInput) SetTagSpecifications(v []*TagSpecification) *CreateSubnetInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateSubnetInput) SetVpcId(v string) *CreateSubnetInput { s.VpcId = &v @@ -47869,6 +52378,111 @@ func (s *CreateTransitGatewayPeeringAttachmentOutput) SetTransitGatewayPeeringAt return s } +type CreateTransitGatewayPrefixListReferenceInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to drop traffic that matches this route. + Blackhole *bool `type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list that is used for destination matches. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The ID of the attachment to which traffic is routed. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTransitGatewayPrefixListReferenceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayPrefixListReferenceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTransitGatewayPrefixListReferenceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTransitGatewayPrefixListReferenceInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlackhole sets the Blackhole field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetBlackhole(v bool) *CreateTransitGatewayPrefixListReferenceInput { + s.Blackhole = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetDryRun(v bool) *CreateTransitGatewayPrefixListReferenceInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetPrefixListId(v string) *CreateTransitGatewayPrefixListReferenceInput { + s.PrefixListId = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetTransitGatewayAttachmentId(v string) *CreateTransitGatewayPrefixListReferenceInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetTransitGatewayRouteTableId(v string) *CreateTransitGatewayPrefixListReferenceInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type CreateTransitGatewayPrefixListReferenceOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list reference. + TransitGatewayPrefixListReference *TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReference" type:"structure"` +} + +// String returns the string representation +func (s CreateTransitGatewayPrefixListReferenceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayPrefixListReferenceOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPrefixListReference sets the TransitGatewayPrefixListReference field's value. +func (s *CreateTransitGatewayPrefixListReferenceOutput) SetTransitGatewayPrefixListReference(v *TransitGatewayPrefixListReference) *CreateTransitGatewayPrefixListReferenceOutput { + s.TransitGatewayPrefixListReference = v + return s +} + type CreateTransitGatewayRouteInput struct { _ struct{} `type:"structure"` @@ -48183,10 +52797,15 @@ func (s *CreateTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment type CreateTransitGatewayVpcAttachmentRequestOptions struct { _ struct{} `type:"structure"` + // Enable or disable support for appliance mode. If enabled, a traffic flow + // between a source and destination uses the same Availability Zone for the + // VPC attachment for the lifetime of that flow. The default is disable. + ApplianceModeSupport *string `type:"string" enum:"ApplianceModeSupportValue"` + // Enable or disable DNS support. The default is enable. DnsSupport *string `type:"string" enum:"DnsSupportValue"` - // Enable or disable IPv6 support. The default is enable. + // Enable or disable IPv6 support. Ipv6Support *string `type:"string" enum:"Ipv6SupportValue"` } @@ -48200,6 +52819,12 @@ func (s CreateTransitGatewayVpcAttachmentRequestOptions) GoString() string { return s.String() } +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetApplianceModeSupport(v string) *CreateTransitGatewayVpcAttachmentRequestOptions { + s.ApplianceModeSupport = &v + return s +} + // SetDnsSupport sets the DnsSupport field's value. func (s *CreateTransitGatewayVpcAttachmentRequestOptions) SetDnsSupport(v string) *CreateTransitGatewayVpcAttachmentRequestOptions { s.DnsSupport = &v @@ -48229,22 +52854,22 @@ type CreateVolumeInput struct { // Specifies whether the volume should be encrypted. The effect of setting the // encryption state to true depends on the volume origin (new or from a snapshot), // starting encryption state, ownership, and whether encryption by default is - // enabled. For more information, see Encryption by Default (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) + // enabled. For more information, see Encryption by default (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) // in the Amazon Elastic Compute Cloud User Guide. // // Encrypted Amazon EBS volumes must be attached to instances that support Amazon - // EBS encryption. For more information, see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + // EBS encryption. For more information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). Encrypted *bool `locationName:"encrypted" type:"boolean"` - // The number of I/O operations per second (IOPS) to provision for the volume, - // with a maximum ratio of 50 IOPS/GiB. Range is 100 to 64,000 IOPS for volumes - // in most Regions. Maximum IOPS of 64,000 is guaranteed only on Nitro-based - // instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // The number of I/O operations per second (IOPS) to provision for an io1 or + // io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB + // for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum + // IOPS of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). // Other instance families guarantee performance up to 32,000 IOPS. For more - // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // This parameter is valid only for Provisioned IOPS SSD (io1) volumes. + // This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes. Iops *int64 `type:"integer"` // The identifier of the AWS Key Management Service (AWS KMS) customer master @@ -48254,11 +52879,11 @@ type CreateVolumeInput struct { // // You can specify the CMK using any of the following: // - // * Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. // // * Key alias. For example, alias/ExampleAlias. // - // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. // // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // @@ -48267,32 +52892,37 @@ type CreateVolumeInput struct { // fails. KmsKeyId *string `type:"string"` + // Specifies whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, + // you can attach the volume to up to 16 Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances) + // in the same Availability Zone. For more information, see Amazon EBS Multi-Attach + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html) + // in the Amazon Elastic Compute Cloud User Guide. + MultiAttachEnabled *bool `type:"boolean"` + // The Amazon Resource Name (ARN) of the Outpost. OutpostArn *string `type:"string"` - // The size of the volume, in GiBs. + // The size of the volume, in GiBs. You must specify either a snapshot ID or + // a volume size. // - // Constraints: 1-16,384 for gp2, 4-16,384 for io1, 500-16,384 for st1, 500-16,384 - // for sc1, and 1-1,024 for standard. If you specify a snapshot, the volume - // size must be equal to or larger than the snapshot size. + // Constraints: 1-16,384 for gp2, 4-16,384 for io1 and io2, 500-16,384 for st1, + // 500-16,384 for sc1, and 1-1,024 for standard. If you specify a snapshot, + // the volume size must be equal to or larger than the snapshot size. // // Default: If you're creating the volume from a snapshot and don't specify // a volume size, the default is the snapshot size. - // - // At least one of Size or SnapshotId is required. Size *int64 `type:"integer"` - // The snapshot from which to create the volume. - // - // At least one of Size or SnapshotId are required. + // The snapshot from which to create the volume. You must specify either a snapshot + // ID or a volume size. SnapshotId *string `type:"string"` // The tags to apply to the volume during creation. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` - // The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned - // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard - // for Magnetic volumes. + // The volume type. This can be gp2 for General Purpose SSD, io1 or io2 for + // Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, + // or standard for Magnetic volumes. // // Default: gp2 VolumeType *string `type:"string" enum:"VolumeType"` @@ -48351,6 +52981,12 @@ func (s *CreateVolumeInput) SetKmsKeyId(v string) *CreateVolumeInput { return s } +// SetMultiAttachEnabled sets the MultiAttachEnabled field's value. +func (s *CreateVolumeInput) SetMultiAttachEnabled(v bool) *CreateVolumeInput { + s.MultiAttachEnabled = &v + return s +} + // SetOutpostArn sets the OutpostArn field's value. func (s *CreateVolumeInput) SetOutpostArn(v string) *CreateVolumeInput { s.OutpostArn = &v @@ -48625,6 +53261,9 @@ type CreateVpcEndpointInput struct { // endpoint network interface. SubnetIds []*string `locationName:"SubnetId" locationNameList:"item" type:"list"` + // The tags to associate with the endpoint. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The type of endpoint. // // Default: Gateway @@ -48710,6 +53349,12 @@ func (s *CreateVpcEndpointInput) SetSubnetIds(v []*string) *CreateVpcEndpointInp return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcEndpointInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcEndpointInput { + s.TagSpecifications = v + return s +} + // SetVpcEndpointType sets the VpcEndpointType field's value. func (s *CreateVpcEndpointInput) SetVpcEndpointType(v string) *CreateVpcEndpointInput { s.VpcEndpointType = &v @@ -48781,6 +53426,9 @@ type CreateVpcEndpointServiceConfigurationInput struct { // The private DNS name to assign to the VPC endpoint service. PrivateDnsName *string `type:"string"` + + // The tags to associate with the service. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -48836,6 +53484,12 @@ func (s *CreateVpcEndpointServiceConfigurationInput) SetPrivateDnsName(v string) return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcEndpointServiceConfigurationInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcEndpointServiceConfigurationInput { + s.TagSpecifications = v + return s +} + type CreateVpcEndpointServiceConfigurationOutput struct { _ struct{} `type:"structure"` @@ -48878,6 +53532,8 @@ type CreateVpcInput struct { AmazonProvidedIpv6CidrBlock *bool `locationName:"amazonProvidedIpv6CidrBlock" type:"boolean"` // The IPv4 network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. // // CidrBlock is a required field CidrBlock *string `type:"string" required:"true"` @@ -48900,11 +53556,23 @@ type CreateVpcInput struct { // Default: default InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` + // The IPv6 CIDR block from the IPv6 address pool. You must also specify Ipv6Pool + // in the request. + // + // To let Amazon choose the IPv6 CIDR block for you, omit this parameter. + Ipv6CidrBlock *string `type:"string"` + // The name of the location from which we advertise the IPV6 CIDR block. Use // this parameter to limit the address to this location. // // You must set AmazonProvidedIpv6CidrBlock to true to use this parameter. Ipv6CidrBlockNetworkBorderGroup *string `type:"string"` + + // The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block. + Ipv6Pool *string `type:"string"` + + // The tags to assign to the VPC. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -48954,12 +53622,30 @@ func (s *CreateVpcInput) SetInstanceTenancy(v string) *CreateVpcInput { return s } +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *CreateVpcInput) SetIpv6CidrBlock(v string) *CreateVpcInput { + s.Ipv6CidrBlock = &v + return s +} + // SetIpv6CidrBlockNetworkBorderGroup sets the Ipv6CidrBlockNetworkBorderGroup field's value. func (s *CreateVpcInput) SetIpv6CidrBlockNetworkBorderGroup(v string) *CreateVpcInput { s.Ipv6CidrBlockNetworkBorderGroup = &v return s } +// SetIpv6Pool sets the Ipv6Pool field's value. +func (s *CreateVpcInput) SetIpv6Pool(v string) *CreateVpcInput { + s.Ipv6Pool = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcInput { + s.TagSpecifications = v + return s +} + type CreateVpcOutput struct { _ struct{} `type:"structure"` @@ -49007,6 +53693,9 @@ type CreateVpcPeeringConnectionInput struct { // You must specify this parameter in the request. PeerVpcId *string `locationName:"peerVpcId" type:"string"` + // The tags to assign to the peering connection. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the requester VPC. You must specify this parameter in the request. VpcId *string `locationName:"vpcId" type:"string"` } @@ -49045,6 +53734,12 @@ func (s *CreateVpcPeeringConnectionInput) SetPeerVpcId(v string) *CreateVpcPeeri return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcPeeringConnectionInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcPeeringConnectionInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateVpcPeeringConnectionInput) SetVpcId(v string) *CreateVpcPeeringConnectionInput { s.VpcId = &v @@ -49092,6 +53787,9 @@ type CreateVpnConnectionInput struct { // The options for the VPN connection. Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` + // The tags to apply to the VPN connection. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the transit gateway. If you specify a transit gateway, you cannot // specify a virtual private gateway. TransitGatewayId *string `type:"string"` @@ -49150,6 +53848,12 @@ func (s *CreateVpnConnectionInput) SetOptions(v *VpnConnectionOptionsSpecificati return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpnConnectionInput) SetTagSpecifications(v []*TagSpecification) *CreateVpnConnectionInput { + s.TagSpecifications = v + return s +} + // SetTransitGatewayId sets the TransitGatewayId field's value. func (s *CreateVpnConnectionInput) SetTransitGatewayId(v string) *CreateVpnConnectionInput { s.TransitGatewayId = &v @@ -49279,6 +53983,9 @@ type CreateVpnGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // The tags to apply to the virtual private gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The type of VPN connection this virtual private gateway supports. // // Type is a required field @@ -49326,6 +54033,12 @@ func (s *CreateVpnGatewayInput) SetDryRun(v bool) *CreateVpnGatewayInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpnGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateVpnGatewayInput { + s.TagSpecifications = v + return s +} + // SetType sets the Type field's value. func (s *CreateVpnGatewayInput) SetType(v string) *CreateVpnGatewayInput { s.Type = &v @@ -49356,12 +54069,12 @@ func (s *CreateVpnGatewayOutput) SetVpnGateway(v *VpnGateway) *CreateVpnGatewayO return s } -// Describes the credit option for CPU usage of a T2 or T3 instance. +// Describes the credit option for CPU usage of a T2, T3, or T3a instance. type CreditSpecification struct { _ struct{} `type:"structure"` - // The credit option for CPU usage of a T2 or T3 instance. Valid values are - // standard and unlimited. + // The credit option for CPU usage of a T2, T3, or T3a instance. Valid values + // are standard and unlimited. CpuCredits *string `locationName:"cpuCredits" type:"string"` } @@ -49381,12 +54094,12 @@ func (s *CreditSpecification) SetCpuCredits(v string) *CreditSpecification { return s } -// The credit option for CPU usage of a T2 or T3 instance. +// The credit option for CPU usage of a T2, T3, or T3a instance. type CreditSpecificationRequest struct { _ struct{} `type:"structure"` - // The credit option for CPU usage of a T2 or T3 instance. Valid values are - // standard and unlimited. + // The credit option for CPU usage of a T2, T3, or T3a instance. Valid values + // are standard and unlimited. // // CpuCredits is a required field CpuCredits *string `type:"string" required:"true"` @@ -49510,6 +54223,79 @@ func (s *CustomerGateway) SetType(v string) *CustomerGateway { return s } +type DeleteCarrierGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the carrier gateway. + // + // CarrierGatewayId is a required field + CarrierGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteCarrierGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCarrierGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCarrierGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCarrierGatewayInput"} + if s.CarrierGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("CarrierGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *DeleteCarrierGatewayInput) SetCarrierGatewayId(v string) *DeleteCarrierGatewayInput { + s.CarrierGatewayId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteCarrierGatewayInput) SetDryRun(v bool) *DeleteCarrierGatewayInput { + s.DryRun = &v + return s +} + +type DeleteCarrierGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the carrier gateway. + CarrierGateway *CarrierGateway `locationName:"carrierGateway" type:"structure"` +} + +// String returns the string representation +func (s DeleteCarrierGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCarrierGatewayOutput) GoString() string { + return s.String() +} + +// SetCarrierGateway sets the CarrierGateway field's value. +func (s *DeleteCarrierGatewayOutput) SetCarrierGateway(v *CarrierGateway) *DeleteCarrierGatewayOutput { + s.CarrierGateway = v + return s +} + type DeleteClientVpnEndpointInput struct { _ struct{} `type:"structure"` @@ -50308,9 +55094,10 @@ type DeleteKeyPairInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // The name of the key pair. - // - // KeyName is a required field - KeyName *string `type:"string" required:"true"` + KeyName *string `type:"string"` + + // The ID of the key pair. + KeyPairId *string `type:"string"` } // String returns the string representation @@ -50323,19 +55110,6 @@ func (s DeleteKeyPairInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteKeyPairInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteKeyPairInput"} - if s.KeyName == nil { - invalidParams.Add(request.NewErrParamRequired("KeyName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetDryRun sets the DryRun field's value. func (s *DeleteKeyPairInput) SetDryRun(v bool) *DeleteKeyPairInput { s.DryRun = &v @@ -50348,6 +55122,12 @@ func (s *DeleteKeyPairInput) SetKeyName(v string) *DeleteKeyPairInput { return s } +// SetKeyPairId sets the KeyPairId field's value. +func (s *DeleteKeyPairInput) SetKeyPairId(v string) *DeleteKeyPairInput { + s.KeyPairId = &v + return s +} + type DeleteKeyPairOutput struct { _ struct{} `type:"structure"` } @@ -50802,9 +55582,88 @@ func (s *DeleteLocalGatewayRouteTableVpcAssociationOutput) SetLocalGatewayRouteT return s } +type DeleteManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteManagedPrefixListInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteManagedPrefixListInput) SetDryRun(v bool) *DeleteManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *DeleteManagedPrefixListInput) SetPrefixListId(v string) *DeleteManagedPrefixListInput { + s.PrefixListId = &v + return s +} + +type DeleteManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s DeleteManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *DeleteManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *DeleteManagedPrefixListOutput { + s.PrefixList = v + return s +} + type DeleteNatGatewayInput struct { _ struct{} `type:"structure"` + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + // The ID of the NAT gateway. // // NatGatewayId is a required field @@ -50834,6 +55693,12 @@ func (s *DeleteNatGatewayInput) Validate() error { return nil } +// SetDryRun sets the DryRun field's value. +func (s *DeleteNatGatewayInput) SetDryRun(v bool) *DeleteNatGatewayInput { + s.DryRun = &v + return s +} + // SetNatGatewayId sets the NatGatewayId field's value. func (s *DeleteNatGatewayInput) SetNatGatewayId(v string) *DeleteNatGatewayInput { s.NatGatewayId = &v @@ -51363,6 +56228,9 @@ type DeleteRouteInput struct { // for the route exactly. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + // The ID of the prefix list for the route. + DestinationPrefixListId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -51410,6 +56278,12 @@ func (s *DeleteRouteInput) SetDestinationIpv6CidrBlock(v string) *DeleteRouteInp return s } +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *DeleteRouteInput) SetDestinationPrefixListId(v string) *DeleteRouteInput { + s.DestinationPrefixListId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *DeleteRouteInput) SetDryRun(v bool) *DeleteRouteInput { s.DryRun = &v @@ -52322,6 +57196,93 @@ func (s *DeleteTransitGatewayPeeringAttachmentOutput) SetTransitGatewayPeeringAt return s } +type DeleteTransitGatewayPrefixListReferenceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The ID of the route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTransitGatewayPrefixListReferenceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTransitGatewayPrefixListReferenceInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTransitGatewayPrefixListReferenceInput) SetDryRun(v bool) *DeleteTransitGatewayPrefixListReferenceInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *DeleteTransitGatewayPrefixListReferenceInput) SetPrefixListId(v string) *DeleteTransitGatewayPrefixListReferenceInput { + s.PrefixListId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *DeleteTransitGatewayPrefixListReferenceInput) SetTransitGatewayRouteTableId(v string) *DeleteTransitGatewayPrefixListReferenceInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type DeleteTransitGatewayPrefixListReferenceOutput struct { + _ struct{} `type:"structure"` + + // Information about the deleted prefix list reference. + TransitGatewayPrefixListReference *TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReference" type:"structure"` +} + +// String returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPrefixListReference sets the TransitGatewayPrefixListReference field's value. +func (s *DeleteTransitGatewayPrefixListReferenceOutput) SetTransitGatewayPrefixListReference(v *TransitGatewayPrefixListReference) *DeleteTransitGatewayPrefixListReferenceOutput { + s.TransitGatewayPrefixListReference = v + return s +} + type DeleteTransitGatewayRouteInput struct { _ struct{} `type:"structure"` @@ -53177,8 +58138,8 @@ func (s DeleteVpnGatewayOutput) GoString() string { type DeprovisionByoipCidrInput struct { _ struct{} `type:"structure"` - // The public IPv4 address range, in CIDR notation. The prefix must be the same - // prefix that you specified when you provisioned the address range. + // The address range, in CIDR notation. The prefix must be the same prefix that + // you specified when you provisioned the address range. // // Cidr is a required field Cidr *string `type:"string" required:"true"` @@ -53313,6 +58274,101 @@ func (s DeregisterImageOutput) GoString() string { return s.String() } +type DeregisterInstanceEventNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Information about the tag keys to deregister. + InstanceTagAttribute *DeregisterInstanceTagAttributeRequest `type:"structure"` +} + +// String returns the string representation +func (s DeregisterInstanceEventNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceEventNotificationAttributesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DeregisterInstanceEventNotificationAttributesInput) SetDryRun(v bool) *DeregisterInstanceEventNotificationAttributesInput { + s.DryRun = &v + return s +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *DeregisterInstanceEventNotificationAttributesInput) SetInstanceTagAttribute(v *DeregisterInstanceTagAttributeRequest) *DeregisterInstanceEventNotificationAttributesInput { + s.InstanceTagAttribute = v + return s +} + +type DeregisterInstanceEventNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // The resulting set of tag keys. + InstanceTagAttribute *InstanceTagNotificationAttribute `locationName:"instanceTagAttribute" type:"structure"` +} + +// String returns the string representation +func (s DeregisterInstanceEventNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceEventNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *DeregisterInstanceEventNotificationAttributesOutput) SetInstanceTagAttribute(v *InstanceTagNotificationAttribute) *DeregisterInstanceEventNotificationAttributesOutput { + s.InstanceTagAttribute = v + return s +} + +// Information about the tag keys to deregister for the current Region. You +// can either specify individual tag keys or deregister all tag keys in the +// current Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys +// in the request +type DeregisterInstanceTagAttributeRequest struct { + _ struct{} `type:"structure"` + + // Indicates whether to deregister all tag keys in the current Region. Specify + // false to deregister all tag keys. + IncludeAllTagsOfInstance *bool `type:"boolean"` + + // Information about the tag keys to deregister. + InstanceTagKeys []*string `locationName:"InstanceTagKey" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeregisterInstanceTagAttributeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceTagAttributeRequest) GoString() string { + return s.String() +} + +// SetIncludeAllTagsOfInstance sets the IncludeAllTagsOfInstance field's value. +func (s *DeregisterInstanceTagAttributeRequest) SetIncludeAllTagsOfInstance(v bool) *DeregisterInstanceTagAttributeRequest { + s.IncludeAllTagsOfInstance = &v + return s +} + +// SetInstanceTagKeys sets the InstanceTagKeys field's value. +func (s *DeregisterInstanceTagAttributeRequest) SetInstanceTagKeys(v []*string) *DeregisterInstanceTagAttributeRequest { + s.InstanceTagKeys = v + return s +} + type DeregisterTransitGatewayMulticastGroupMembersInput struct { _ struct{} `type:"structure"` @@ -53547,7 +58603,8 @@ type DescribeAddressesInput struct { // * instance-id - The ID of the instance the address is associated with, // if any. // - // * network-border-group - The location from where the IP address is advertised. + // * network-border-group - A unique set of Availability Zones, Local Zones, + // or Wavelength Zones from where AWS advertises IP addresses. // // * network-interface-id - [EC2-VPC] The ID of the network interface that // the address is associated with, if any. @@ -53557,7 +58614,7 @@ type DescribeAddressesInput struct { // * private-ip-address - [EC2-VPC] The private IP address associated with // the Elastic IP address. // - // * public-ip - The Elastic IP address. + // * public-ip - The Elastic IP address, or the carrier IP address. // // * tag: - The key/value combination of a tag assigned to the resource. // Use the tag key in the filter name and the tag value as the filter value. @@ -53696,8 +58753,8 @@ func (s *DescribeAggregateIdFormatOutput) SetUseLongIdsAggregated(v bool) *Descr type DescribeAvailabilityZonesInput struct { _ struct{} `type:"structure"` - // Include all Availability Zones and Local Zones regardless of your opt in - // status. + // Include all Availability Zones, Local Zones, and Wavelength Zones regardless + // of your opt-in status. // // If you do not use this parameter, the results include only the zones for // the Regions where you have chosen the option to opt in. @@ -53713,29 +58770,41 @@ type DescribeAvailabilityZonesInput struct { // // * group-name - For Availability Zones, use the Region name. For Local // Zones, use the name of the group associated with the Local Zone (for example, - // us-west-2-lax-1). + // us-west-2-lax-1) For Wavelength Zones, use the name of the group associated + // with the Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1). + // + // * message - The Zone message. // - // * message - The Availability Zone or Local Zone message. + // * opt-in-status - The opt-in status (opted-in, and not-opted-in | opt-in-not-required). // - // * opt-in-status - The opt in status (opted-in, and not-opted-in | opt-in-not-required). + // * parent-zoneID - The ID of the zone that handles some of the Local Zone + // and Wavelength Zone control plane operations, such as API calls. // - // * region-name - The name of the Region for the Availability Zone or Local - // Zone (for example, us-east-1). + // * parent-zoneName - The ID of the zone that handles some of the Local + // Zone and Wavelength Zone control plane operations, such as API calls. // - // * state - The state of the Availability Zone or Local Zone (available - // | information | impaired | unavailable). + // * region-name - The name of the Region for the Zone (for example, us-east-1). // - // * zone-id - The ID of the Availability Zone (for example, use1-az1) or - // the Local Zone (for example, use usw2-lax1-az1). + // * state - The state of the Availability Zone, the Local Zone, or the Wavelength + // Zone (available | information | impaired | unavailable). // - // * zone-name - The name of the Availability Zone (for example, us-east-1a) - // or the Local Zone (for example, use us-west-2-lax-1a). + // * zone-id - The ID of the Availability Zone (for example, use1-az1), the + // Local Zone (for example, usw2-lax1-az1), or the Wavelength Zone (for example, + // us-east-1-wl1-bos-wlz-1). + // + // * zone-type - The type of zone, for example, local-zone. + // + // * zone-name - The name of the Availability Zone (for example, us-east-1a), + // the Local Zone (for example, us-west-2-lax-1a), or the Wavelength Zone + // (for example, us-east-1-wl1-bos-wlz-1). + // + // * zone-type - The type of zone, for example, local-zone. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The IDs of the Availability Zones and Local Zones. + // The IDs of the Availability Zones, Local Zones, and Wavelength Zones. ZoneIds []*string `locationName:"ZoneId" locationNameList:"ZoneId" type:"list"` - // The names of the Availability Zones and Local Zones. + // The names of the Availability Zones, Local Zones, and Wavelength Zones. ZoneNames []*string `locationName:"ZoneName" locationNameList:"ZoneName" type:"list"` } @@ -53782,7 +58851,7 @@ func (s *DescribeAvailabilityZonesInput) SetZoneNames(v []*string) *DescribeAvai type DescribeAvailabilityZonesOutput struct { _ struct{} `type:"structure"` - // Information about the Availability Zones and Local Zones. + // Information about the Availability Zones, Local Zones, and Wavelength Zones. AvailabilityZones []*AvailabilityZone `locationName:"availabilityZoneInfo" locationNameList:"item" type:"list"` } @@ -54002,14 +59071,66 @@ type DescribeCapacityReservationsInput struct { DryRun *bool `type:"boolean"` // One or more filters. + // + // * instance-type - The type of instance for which the Capacity Reservation + // reserves capacity. + // + // * owner-id - The ID of the AWS account that owns the Capacity Reservation. + // + // * availability-zone-id - The Availability Zone ID of the Capacity Reservation. + // + // * instance-platform - The type of operating system for which the Capacity + // Reservation reserves capacity. + // + // * availability-zone - The Availability Zone ID of the Capacity Reservation. + // + // * tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity + // Reservation can have one of the following tenancy settings: default - + // The Capacity Reservation is created on hardware that is shared with other + // AWS accounts. dedicated - The Capacity Reservation is created on single-tenant + // hardware that is dedicated to a single AWS account. + // + // * state - The current state of the Capacity Reservation. A Capacity Reservation + // can be in one of the following states: active- The Capacity Reservation + // is active and the capacity is available for your use. expired - The Capacity + // Reservation expired automatically at the date and time specified in your + // request. The reserved capacity is no longer available for your use. cancelled + // - The Capacity Reservation was manually cancelled. The reserved capacity + // is no longer available for your use. pending - The Capacity Reservation + // request was successful but the capacity provisioning is still pending. + // failed - The Capacity Reservation request has failed. A request might + // fail due to invalid request parameters, capacity constraints, or instance + // limit constraints. Failed requests are retained for 60 minutes. + // + // * end-date - The date and time at which the Capacity Reservation expires. + // When a Capacity Reservation expires, the reserved capacity is released + // and you can no longer launch instances into it. The Capacity Reservation's + // state changes to expired when it reaches its end date and time. + // + // * end-date-type - Indicates the way in which the Capacity Reservation + // ends. A Capacity Reservation can have one of the following end types: + // unlimited - The Capacity Reservation remains active until you explicitly + // cancel it. limited - The Capacity Reservation expires automatically at + // a specified date and time. + // + // * instance-match-criteria - Indicates the type of instance launches that + // the Capacity Reservation accepts. The options include: open - The Capacity + // Reservation accepts all instances that have matching attributes (instance + // type, platform, and Availability Zone). Instances that have matching attributes + // launch into the Capacity Reservation automatically without specifying + // any additional parameters. targeted - The Capacity Reservation only accepts + // instances that have matching attributes (instance type, platform, and + // Availability Zone), and explicitly target the Capacity Reservation. This + // ensures that only permitted instances can use the reserved capacity. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned - // nextToken value. + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. MaxResults *int64 `min:"1" type:"integer"` - // The token to retrieve the next page of results. + // The token to use to retrieve the next page of results. NextToken *string `type:"string"` } @@ -54099,6 +59220,134 @@ func (s *DescribeCapacityReservationsOutput) SetNextToken(v string) *DescribeCap return s } +type DescribeCarrierGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more carrier gateway IDs. + CarrierGatewayIds []*string `locationName:"CarrierGatewayId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * carrier-gateway-id - The ID of the carrier gateway. + // + // * state - The state of the carrier gateway (pending | failed | available + // | deleting | deleted). + // + // * owner-id - The AWS account ID of the owner of the carrier gateway. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC associated with the carrier gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCarrierGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCarrierGatewaysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCarrierGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCarrierGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCarrierGatewayIds sets the CarrierGatewayIds field's value. +func (s *DescribeCarrierGatewaysInput) SetCarrierGatewayIds(v []*string) *DescribeCarrierGatewaysInput { + s.CarrierGatewayIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeCarrierGatewaysInput) SetDryRun(v bool) *DescribeCarrierGatewaysInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCarrierGatewaysInput) SetFilters(v []*Filter) *DescribeCarrierGatewaysInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeCarrierGatewaysInput) SetMaxResults(v int64) *DescribeCarrierGatewaysInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCarrierGatewaysInput) SetNextToken(v string) *DescribeCarrierGatewaysInput { + s.NextToken = &v + return s +} + +type DescribeCarrierGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about the carrier gateway. + CarrierGateways []*CarrierGateway `locationName:"carrierGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCarrierGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCarrierGatewaysOutput) GoString() string { + return s.String() +} + +// SetCarrierGateways sets the CarrierGateways field's value. +func (s *DescribeCarrierGatewaysOutput) SetCarrierGateways(v []*CarrierGateway) *DescribeCarrierGatewaysOutput { + s.CarrierGateways = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCarrierGatewaysOutput) SetNextToken(v string) *DescribeCarrierGatewaysOutput { + s.NextToken = &v + return s +} + type DescribeClassicLinkInstancesInput struct { _ struct{} `type:"structure"` @@ -54243,6 +59492,14 @@ type DescribeClientVpnAuthorizationRulesInput struct { DryRun *bool `type:"boolean"` // One or more filters. Filter names and values are case-sensitive. + // + // * description - The description of the authorization rule. + // + // * destination-cidr - The CIDR of the network to which the authorization + // rule applies. + // + // * group-id - The ID of the Active Directory group to which the authorization + // rule grants access. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -54358,6 +59615,11 @@ type DescribeClientVpnConnectionsInput struct { DryRun *bool `type:"boolean"` // One or more filters. Filter names and values are case-sensitive. + // + // * connection-id - The ID of the connection. + // + // * username - For Active Directory client authentication, the user name + // of the client who established the client connection. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -54471,6 +59733,10 @@ type DescribeClientVpnEndpointsInput struct { DryRun *bool `type:"boolean"` // One or more filters. Filter names and values are case-sensitive. + // + // * endpoint-id - The ID of the Client VPN endpoint. + // + // * transport-protocol - The transport protocol (tcp | udp). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -54583,6 +59849,13 @@ type DescribeClientVpnRoutesInput struct { DryRun *bool `type:"boolean"` // One or more filters. Filter names and values are case-sensitive. + // + // * destination-cidr - The CIDR of the route destination. + // + // * origin - How the route was associated with the Client VPN endpoint (associate + // | add-route). + // + // * target-subnet - The ID of the subnet through which traffic is routed. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -54701,6 +59974,12 @@ type DescribeClientVpnTargetNetworksInput struct { DryRun *bool `type:"boolean"` // One or more filters. Filter names and values are case-sensitive. + // + // * association-id - The ID of the association. + // + // * target-network-id - The ID of the subnet specified as the target network. + // + // * vpc-id - The ID of the VPC in which the target network is located. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -55571,6 +60850,7 @@ type DescribeExportTasksInput struct { // The export task IDs. ExportTaskIds []*string `locationName:"exportTaskId" locationNameList:"ExportTaskId" type:"list"` + // the filters for the export tasks. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` } @@ -55641,10 +60921,11 @@ type DescribeFastSnapshotRestoreSuccessItem struct { // The time at which fast snapshot restores entered the optimizing state. OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` - // The alias of the snapshot owner. + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. OwnerAlias *string `locationName:"ownerAlias" type:"string"` - // The ID of the AWS account that owns the snapshot. + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the snapshot. @@ -55752,7 +61033,8 @@ type DescribeFastSnapshotRestoresInput struct { // // * availability-zone: The Availability Zone of the snapshot. // - // * owner-id: The ID of the AWS account that owns the snapshot. + // * owner-id: The ID of the AWS account that enabled fast snapshot restore + // on the snapshot. // // * snapshot-id: The ID of the snapshot. // @@ -56358,7 +61640,7 @@ type DescribeFlowLogsInput struct { // // * log-destination-type - The type of destination to which the flow log // publishes data. Possible destination types include cloud-watch-logs and - // S3. + // s3. // // * flow-log-id - The ID of the flow log. // @@ -56367,6 +61649,16 @@ type DescribeFlowLogsInput struct { // * resource-id - The ID of the VPC, subnet, or network interface. // // * traffic-type - The type of traffic (ACCEPT | REJECT | ALL). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. Filter []*Filter `locationNameList:"Filter" type:"list"` // One or more flow log IDs. @@ -57041,8 +62333,7 @@ type DescribeIamInstanceProfileAssociationsInput struct { // // * instance-id - The ID of the instance. // - // * state - The state of the association (associating | associated | disassociating - // | disassociated). + // * state - The state of the association (associating | associated | disassociating). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return in a single call. To retrieve the @@ -57449,7 +62740,7 @@ type DescribeImagesInput struct { // in GiB. // // * block-device-mapping.volume-type - The volume type of the EBS volume - // (gp2 | io1 | st1 | sc1 | standard). + // (gp2 | io1 | io2 | st1 | sc1 | standard). // // * block-device-mapping.encrypted - A Boolean that indicates whether the // EBS volume is encrypted. @@ -57473,11 +62764,13 @@ type DescribeImagesInput struct { // // * name - The name of the AMI (provided during image creation). // - // * owner-alias - String value from an Amazon-maintained list (amazon | - // aws-marketplace | microsoft) of snapshot owners. Not to be confused with - // the user-configured AWS account alias, which is set from the IAM console. + // * owner-alias - The owner alias, from an Amazon-maintained list (amazon + // | aws-marketplace). This is not the user-configured AWS account alias + // set using the IAM console. We recommend that you use the related parameter + // instead of this filter. // - // * owner-id - The AWS account ID of the image owner. + // * owner-id - The AWS account ID of the owner. We recommend that you use + // the related parameter instead of this filter. // // * platform - The platform. To only list Windows-based AMIs, use windows. // @@ -57519,10 +62812,10 @@ type DescribeImagesInput struct { // Default: Describes all images available to you. ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"` - // Filters the images by the owner. Specify an AWS account ID, self (owner is - // the sender of the request), or an AWS owner alias (valid values are amazon - // | aws-marketplace | microsoft). Omitting this option returns all images for - // which you have launch permissions, regardless of ownership. + // Scopes the results to images with the specified owners. You can specify a + // combination of AWS account IDs, self, amazon, and aws-marketplace. If you + // omit this parameter, the results include all images for which you have launch + // permissions, regardless of ownership. Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` } @@ -57866,6 +63159,10 @@ type DescribeInstanceAttributeOutput struct { // Indicates whether enhanced networking with ENA is enabled. EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` + // To enable the instance for AWS Nitro Enclaves, set this parameter to true; + // otherwise, set it to false. + EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"` + // The security groups associated with the instance. Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` @@ -57938,6 +63235,12 @@ func (s *DescribeInstanceAttributeOutput) SetEnaSupport(v *AttributeBooleanValue return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *DescribeInstanceAttributeOutput) SetEnclaveOptions(v *EnclaveOptions) *DescribeInstanceAttributeOutput { + s.EnclaveOptions = v + return s +} + // SetGroups sets the Groups field's value. func (s *DescribeInstanceAttributeOutput) SetGroups(v []*GroupIdentifier) *DescribeInstanceAttributeOutput { s.Groups = v @@ -58121,6 +63424,55 @@ func (s *DescribeInstanceCreditSpecificationsOutput) SetNextToken(v string) *Des return s } +type DescribeInstanceEventNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeInstanceEventNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceEventNotificationAttributesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstanceEventNotificationAttributesInput) SetDryRun(v bool) *DescribeInstanceEventNotificationAttributesInput { + s.DryRun = &v + return s +} + +type DescribeInstanceEventNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the registered tag keys. + InstanceTagAttribute *InstanceTagNotificationAttribute `locationName:"instanceTagAttribute" type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceEventNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceEventNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *DescribeInstanceEventNotificationAttributesOutput) SetInstanceTagAttribute(v *InstanceTagNotificationAttribute) *DescribeInstanceEventNotificationAttributesOutput { + s.InstanceTagAttribute = v + return s +} + type DescribeInstanceStatusInput struct { _ struct{} `type:"structure"` @@ -58290,7 +63642,7 @@ type DescribeInstanceTypeOfferingsInput struct { // type is region (default), the location is the Region code (for example, // us-east-2.) // - // * instance-type - The instance type. + // * instance-type - The instance type. For example, c5.2xlarge. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The location type. @@ -58402,31 +63754,52 @@ type DescribeInstanceTypesInput struct { // One or more filters. Filter names and values are case-sensitive. // - // * auto-recovery-supported - Indicates whether auto recovery is supported. - // (true | false) + // * auto-recovery-supported - Indicates whether auto recovery is supported + // (true | false). // - // * bare-metal - Indicates whether it is a bare metal instance type. (true - // | false) + // * bare-metal - Indicates whether it is a bare metal instance type (true + // | false). // // * burstable-performance-supported - Indicates whether it is a burstable - // performance instance type. (true | false) + // performance instance type (true | false). // // * current-generation - Indicates whether this instance type is the latest - // generation instance type of an instance family. (true | false) + // generation instance type of an instance family (true | false). + // + // * ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline + // bandwidth performance for an EBS-optimized instance type, in Mbps. + // + // * ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output + // storage operations per second for an EBS-optimized instance type. + // + // * ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline + // throughput performance for an EBS-optimized instance type, in MB/s. + // + // * ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum + // bandwidth performance for an EBS-optimized instance type, in Mbps. + // + // * ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output + // storage operations per second for an EBS-optimized instance type. + // + // * ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum + // throughput performance for an EBS-optimized instance type, in MB/s. // // * ebs-info.ebs-optimized-support - Indicates whether the instance type - // is EBS-optimized. (true | false) + // is EBS-optimized (supported | unsupported | default). // - // * ebs-info.encryption-support - Indicates whether EBS encryption is supported. - // (true | false) + // * ebs-info.encryption-support - Indicates whether EBS encryption is supported + // (supported | unsupported). + // + // * ebs-info.nvme-support - Indicates whether non-volatile memory express + // (NVMe) is supported for EBS volumes (required | supported | unsupported). // // * free-tier-eligible - Indicates whether the instance type is eligible - // to use in the free tier. (true | false) + // to use in the free tier (true | false). // - // * hibernation-supported - Indicates whether On-Demand hibernation is supported. - // (true | false) + // * hibernation-supported - Indicates whether On-Demand hibernation is supported + // (true | false). // - // * hypervisor - The hypervisor used. (nitro | xen) + // * hypervisor - The hypervisor (nitro | xen). // // * instance-storage-info.disk.count - The number of local disks. // @@ -58434,18 +63807,27 @@ type DescribeInstanceTypesInput struct { // storage disk, in GB. // // * instance-storage-info.disk.type - The storage technology for the local - // instance storage disks. (hdd | ssd) + // instance storage disks (hdd | ssd). + // + // * instance-storage-info.nvme-support - Indicates whether non-volatile + // memory express (NVMe) is supported for instance store (required | supported) + // | unsupported). // // * instance-storage-info.total-size-in-gb - The total amount of storage // available from all local instance storage, in GB. // // * instance-storage-supported - Indicates whether the instance type has - // local instance storage. (true | false) + // local instance storage (true | false). + // + // * instance-type - The instance type (for example c5.2xlarge or c5*). // // * memory-info.size-in-mib - The memory size. // + // * network-info.efa-supported - Indicates whether the instance type supports + // Elastic Fabric Adapter (EFA) (true | false). + // // * network-info.ena-support - Indicates whether Elastic Network Adapter - // (ENA) is supported or required. (required | supported | unsupported) + // (ENA) is supported or required (required | supported | unsupported). // // * network-info.ipv4-addresses-per-interface - The maximum number of private // IPv4 addresses per network interface. @@ -58454,24 +63836,40 @@ type DescribeInstanceTypesInput struct { // IPv6 addresses per network interface. // // * network-info.ipv6-supported - Indicates whether the instance type supports - // IPv6. (true | false) + // IPv6 (true | false). // // * network-info.maximum-network-interfaces - The maximum number of network // interfaces per instance. // - // * network-info.network-performance - Describes the network performance. + // * network-info.network-performance - The network performance (for example, + // "25 Gigabit"). + // + // * processor-info.supported-architecture - The CPU architecture (arm64 + // | i386 | x86_64). // // * processor-info.sustained-clock-speed-in-ghz - The CPU clock speed, in // GHz. // + // * supported-root-device-type - The root device type (ebs | instance-store). + // + // * supported-usage-class - The usage class (on-demand | spot). + // + // * supported-virtualization-type - The virtualization type (hvm | paravirtual). + // // * vcpu-info.default-cores - The default number of cores for the instance // type. // // * vcpu-info.default-threads-per-core - The default number of threads per - // cores for the instance type. + // core for the instance type. // // * vcpu-info.default-vcpus - The default number of vCPUs for the instance // type. + // + // * vcpu-info.valid-cores - The number of cores that can be configured for + // the instance type. + // + // * vcpu-info.valid-threads-per-core - The number of threads per core that + // can be configured for the instance type. For example, "1" or "1,2". Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The instance types. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) @@ -58624,7 +64022,8 @@ type DescribeInstancesInput struct { // * host-id - The ID of the Dedicated Host on which the instance is running, // if applicable. // - // * hypervisor - The hypervisor type of the instance (ovm | xen). + // * hypervisor - The hypervisor type of the instance (ovm | xen). The value + // xen is used for both Xen and Nitro hypervisors. // // * iam-instance-profile.arn - The instance profile associated with the // instance. Specified as an ARN. @@ -59049,6 +64448,125 @@ func (s *DescribeInternetGatewaysOutput) SetNextToken(v string) *DescribeInterne return s } +type DescribeIpv6PoolsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The IDs of the IPv6 address pools. + PoolIds []*string `locationName:"PoolId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeIpv6PoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIpv6PoolsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIpv6PoolsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIpv6PoolsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeIpv6PoolsInput) SetDryRun(v bool) *DescribeIpv6PoolsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeIpv6PoolsInput) SetFilters(v []*Filter) *DescribeIpv6PoolsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeIpv6PoolsInput) SetMaxResults(v int64) *DescribeIpv6PoolsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIpv6PoolsInput) SetNextToken(v string) *DescribeIpv6PoolsInput { + s.NextToken = &v + return s +} + +// SetPoolIds sets the PoolIds field's value. +func (s *DescribeIpv6PoolsInput) SetPoolIds(v []*string) *DescribeIpv6PoolsInput { + s.PoolIds = v + return s +} + +type DescribeIpv6PoolsOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPv6 address pools. + Ipv6Pools []*Ipv6Pool `locationName:"ipv6PoolSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeIpv6PoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIpv6PoolsOutput) GoString() string { + return s.String() +} + +// SetIpv6Pools sets the Ipv6Pools field's value. +func (s *DescribeIpv6PoolsOutput) SetIpv6Pools(v []*Ipv6Pool) *DescribeIpv6PoolsOutput { + s.Ipv6Pools = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeIpv6PoolsOutput) SetNextToken(v string) *DescribeIpv6PoolsOutput { + s.NextToken = &v + return s +} + type DescribeKeyPairsInput struct { _ struct{} `type:"structure"` @@ -59060,9 +64578,21 @@ type DescribeKeyPairsInput struct { // The filters. // + // * key-pair-id - The ID of the key pair. + // // * fingerprint - The fingerprint of the key pair. // // * key-name - The name of the key pair. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The key pair names. @@ -59161,12 +64691,16 @@ type DescribeLaunchTemplateVersionsInput struct { // * ram-disk-id - The RAM disk ID. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The ID of the launch template. You must specify either the launch template - // ID or launch template name in the request. + // The ID of the launch template. To describe one or more versions of a specified + // launch template, you must specify either the launch template ID or the launch + // template name in the request. To describe all the latest or default launch + // template versions in your account, you must omit this parameter. LaunchTemplateId *string `type:"string"` - // The name of the launch template. You must specify either the launch template - // ID or launch template name in the request. + // The name of the launch template. To describe one or more versions of a specified + // launch template, you must specify either the launch template ID or the launch + // template name in the request. To describe all the latest or default launch + // template versions in your account, you must omit this parameter. LaunchTemplateName *string `min:"3" type:"string"` // The maximum number of results to return in a single call. To retrieve the @@ -59183,7 +64717,18 @@ type DescribeLaunchTemplateVersionsInput struct { // The token to request the next page of results. NextToken *string `type:"string"` - // One or more versions of the launch template. + // One or more versions of the launch template. Valid values depend on whether + // you are describing a specified launch template (by ID or name) or all launch + // templates in your account. + // + // To describe one or more versions of a specified launch template, valid values + // are $Latest, $Default, and numbers. + // + // To describe all launch templates in your account that are defined as the + // latest version, the valid value is $Latest. To describe all launch templates + // in your account that are defined as the default version, the valid value + // is $Default. You can specify $Latest and $Default in the same call. You cannot + // specify numbers. Versions []*string `locationName:"LaunchTemplateVersion" locationNameList:"item" type:"list"` } @@ -59440,6 +64985,18 @@ type DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput struct DryRun *bool `type:"boolean"` // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of the local gateway route table. + // + // * local-gateway-route-table-virtual-interface-group-association-id - The + // ID of the association. + // + // * local-gateway-route-table-virtual-interface-group-id - The ID of the + // virtual interface group. + // + // * state - The state of the association. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the associations. @@ -59549,6 +65106,16 @@ type DescribeLocalGatewayRouteTableVpcAssociationsInput struct { DryRun *bool `type:"boolean"` // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of the local gateway route table. + // + // * local-gateway-route-table-vpc-association-id - The ID of the association. + // + // * state - The state of the association. + // + // * vpc-id - The ID of the VPC. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the associations. @@ -59658,6 +65225,14 @@ type DescribeLocalGatewayRouteTablesInput struct { DryRun *bool `type:"boolean"` // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of a local gateway route table. + // + // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost. + // + // * state - The state of the local gateway route table. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the local gateway route tables. @@ -59767,6 +65342,13 @@ type DescribeLocalGatewayVirtualInterfaceGroupsInput struct { DryRun *bool `type:"boolean"` // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-virtual-interface-id - The ID of the virtual interface. + // + // * local-gateway-virtual-interface-group-id - The ID of the virtual interface + // group. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the virtual interface groups. @@ -59987,7 +65569,21 @@ type DescribeLocalGatewaysInput struct { // One or more filters. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The IDs of the local gateways. + // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of the local gateway route table. + // + // * local-gateway-route-table-virtual-interface-group-association-id - The + // ID of the association. + // + // * local-gateway-route-table-virtual-interface-group-id - The ID of the + // virtual interface group. + // + // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost. + // + // * state - The state of the association. LocalGatewayIds []*string `locationName:"LocalGatewayId" locationNameList:"item" type:"list"` // The maximum number of results to return with a single call. To retrieve the @@ -60084,6 +65680,121 @@ func (s *DescribeLocalGatewaysOutput) SetNextToken(v string) *DescribeLocalGatew return s } +type DescribeManagedPrefixListsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * owner-id - The ID of the prefix list owner. + // + // * prefix-list-id - The ID of the prefix list. + // + // * prefix-list-name - The name of the prefix list. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more prefix list IDs. + PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeManagedPrefixListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeManagedPrefixListsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeManagedPrefixListsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeManagedPrefixListsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeManagedPrefixListsInput) SetDryRun(v bool) *DescribeManagedPrefixListsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeManagedPrefixListsInput) SetFilters(v []*Filter) *DescribeManagedPrefixListsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeManagedPrefixListsInput) SetMaxResults(v int64) *DescribeManagedPrefixListsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeManagedPrefixListsInput) SetNextToken(v string) *DescribeManagedPrefixListsInput { + s.NextToken = &v + return s +} + +// SetPrefixListIds sets the PrefixListIds field's value. +func (s *DescribeManagedPrefixListsInput) SetPrefixListIds(v []*string) *DescribeManagedPrefixListsInput { + s.PrefixListIds = v + return s +} + +type DescribeManagedPrefixListsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the prefix lists. + PrefixLists []*ManagedPrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeManagedPrefixListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeManagedPrefixListsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeManagedPrefixListsOutput) SetNextToken(v string) *DescribeManagedPrefixListsOutput { + s.NextToken = &v + return s +} + +// SetPrefixLists sets the PrefixLists field's value. +func (s *DescribeManagedPrefixListsOutput) SetPrefixLists(v []*ManagedPrefixList) *DescribeManagedPrefixListsOutput { + s.PrefixLists = v + return s +} + type DescribeMovingAddressesInput struct { _ struct{} `type:"structure"` @@ -60203,6 +65914,12 @@ func (s *DescribeMovingAddressesOutput) SetNextToken(v string) *DescribeMovingAd type DescribeNatGatewaysInput struct { _ struct{} `type:"structure"` + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + // One or more filters. // // * nat-gateway-id - The ID of the NAT gateway. @@ -60259,6 +65976,12 @@ func (s *DescribeNatGatewaysInput) Validate() error { return nil } +// SetDryRun sets the DryRun field's value. +func (s *DescribeNatGatewaysInput) SetDryRun(v bool) *DescribeNatGatewaysInput { + s.DryRun = &v + return s +} + // SetFilter sets the Filter field's value. func (s *DescribeNatGatewaysInput) SetFilter(v []*Filter) *DescribeNatGatewaysInput { s.Filter = v @@ -60762,9 +66485,6 @@ type DescribeNetworkInterfacesInput struct { // * attachment.instance-owner-id - The owner ID of the instance to which // the network interface is attached. // - // * attachment.nat-gateway-id - The ID of the NAT gateway to which the network - // interface is attached. - // // * attachment.status - The status of the attachment (attaching | attached // | detaching | detached). // @@ -60942,6 +66662,16 @@ type DescribePlacementGroupsInput struct { // | deleted). // // * strategy - The strategy of the placement group (cluster | spread | partition). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific key, regardless + // of the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the placement groups. @@ -61219,6 +66949,19 @@ func (s *DescribePrincipalIdFormatOutput) SetPrincipals(v []*PrincipalIdFormat) type DescribePublicIpv4PoolsInput struct { _ struct{} `type:"structure"` + // One or more filters. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + // The maximum number of results to return with a single call. To retrieve the // remaining results, make another call with the returned nextToken value. MaxResults *int64 `min:"1" type:"integer"` @@ -61253,6 +66996,12 @@ func (s *DescribePublicIpv4PoolsInput) Validate() error { return nil } +// SetFilters sets the Filters field's value. +func (s *DescribePublicIpv4PoolsInput) SetFilters(v []*Filter) *DescribePublicIpv4PoolsInput { + s.Filters = v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *DescribePublicIpv4PoolsInput) SetMaxResults(v int64) *DescribePublicIpv4PoolsInput { s.MaxResults = &v @@ -62025,8 +67774,6 @@ type DescribeRouteTablesInput struct { // to find all resources assigned a tag with a specific key, regardless of // the tag value. // - // * transit-gateway-id - The ID of a transit gateway. - // // * vpc-id - The ID of the VPC for the route table. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -62515,8 +68262,8 @@ type DescribeSecurityGroupsInput struct { // * egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an outbound // security group rule. // - // * egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service - // to which a security group rule allows outbound access. + // * egress.ip-permission.prefix-list-id - The ID of a prefix list to which + // a security group rule allows outbound access. // // * egress.ip-permission.protocol - The IP protocol for an outbound security // group rule (tcp | udp | icmp or a protocol number). @@ -62546,8 +68293,8 @@ type DescribeSecurityGroupsInput struct { // * ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security // group rule. // - // * ip-permission.prefix-list-id - The ID (prefix) of the AWS service from - // which a security group rule allows inbound access. + // * ip-permission.prefix-list-id - The ID of a prefix list from which a + // security group rule allows inbound access. // // * ip-permission.protocol - The IP protocol for an inbound security group // rule (tcp | udp | icmp or a protocol number). @@ -62810,12 +68557,12 @@ type DescribeSnapshotsInput struct { // // * encrypted - Indicates whether the snapshot is encrypted (true | false) // - // * owner-alias - Value from an Amazon-maintained list (amazon | self | - // all | aws-marketplace | microsoft) of snapshot owners. Not to be confused - // with the user-configured AWS account alias, which is set from the IAM - // console. + // * owner-alias - The owner alias, from an Amazon-maintained list (amazon). + // This is not the user-configured AWS account alias set using the IAM console. + // We recommend that you use the related parameter instead of this filter. // - // * owner-id - The ID of the AWS account that owns the snapshot. + // * owner-id - The AWS account ID of the owner. We recommend that you use + // the related parameter instead of this filter. // // * progress - The progress of the snapshot, as a percentage (for example, // 80%). @@ -62859,7 +68606,8 @@ type DescribeSnapshotsInput struct { // to return. NextToken *string `type:"string"` - // Describes the snapshots owned by these owners. + // Scopes the results to snapshots with the specified owners. You can specify + // a combination of AWS account IDs, self, and amazon. OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` // The IDs of the AWS accounts that can create volumes from the snapshot. @@ -63415,8 +69163,8 @@ type DescribeSpotInstanceRequestsInput struct { // in GiB. // // * launch.block-device-mapping.volume-type - The type of EBS volume: gp2 - // for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput - // Optimized HDD, sc1for Cold HDD, or standard for Magnetic. + // for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for + // Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic. // // * launch.group-id - The ID of the security group for the instance. // @@ -63470,7 +69218,7 @@ type DescribeSpotInstanceRequestsInput struct { // * state - The state of the Spot Instance request (open | active | closed // | cancelled | failed). Spot request status information can help you track // your Amazon EC2 Spot Instance requests. For more information, see Spot - // Request Status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // request status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) // in the Amazon EC2 User Guide for Linux Instances. // // * status-code - The short code describing the most recent evaluation of @@ -63608,8 +69356,9 @@ type DescribeSpotPriceHistoryInput struct { // * instance-type - The type of instance (for example, m3.medium). // // * product-description - The product description for the Spot price (Linux/UNIX - // | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon - // VPC) | Windows (Amazon VPC)). + // | Red Hat Enterprise Linux | SUSE Linux | Windows | Linux/UNIX (Amazon + // VPC) | Red Hat Enterprise Linux (Amazon VPC) | SUSE Linux (Amazon VPC) + // | Windows (Amazon VPC)). // // * spot-price - The Spot price. The value must match exactly (or use wildcards; // greater than or less than comparison is not supported). @@ -64018,10 +69767,11 @@ type DescribeTagsInput struct { // * resource-id - The ID of the resource. // // * resource-type - The resource type (customer-gateway | dedicated-host - // | dhcp-options | elastic-ip | fleet | fpga-image | image | instance | - // host-reservation | internet-gateway | launch-template | natgateway | network-acl - // | network-interface | reserved-instances | route-table | security-group - // | snapshot | spot-instances-request | subnet | volume | vpc | vpc-peering-connection + // | dhcp-options | elastic-ip | fleet | fpga-image | host-reservation | + // image | instance | internet-gateway | key-pair | launch-template | natgateway + // | network-acl | network-interface | placement-group | reserved-instances + // | route-table | security-group | snapshot | spot-instances-request | subnet + // | volume | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection // | vpn-connection | vpn-gateway). // // * tag: - The key/value combination of the tag. For example, specify @@ -64489,11 +70239,12 @@ type DescribeTransitGatewayAttachmentsInput struct { // // * resource-owner-id - The ID of the AWS account that owns the resource. // - // * resource-type - The resource type (vpc | vpn). + // * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway + // | peering. // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the attachment. Valid values are available | deleted + // | deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance + // | pending | rollingBack | rejected | rejecting. // // * transit-gateway-attachment-id - The ID of the attachment. // @@ -64726,7 +70477,30 @@ type DescribeTransitGatewayPeeringAttachmentsInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // One or more filters. + // One or more filters. The possible values are: + // + // * transit-gateway-attachment-id - The ID of the transit gateway attachment. + // + // * local-owner-id - The ID of your AWS account. + // + // * remote-owner-id - The ID of the AWS account in the remote Region that + // owns the transit gateway. + // + // * state - The state of the peering attachment. Valid values are available + // | deleted | deleting | failed | failing | initiatingRequest | modifying + // | pendingAcceptance | pending | rollingBack | rejected | rejecting). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific key, regardless + // of the tag value. + // + // * transit-gateway-id - The ID of the transit gateway. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return with a single call. To retrieve the @@ -64843,9 +70617,8 @@ type DescribeTransitGatewayRouteTablesInput struct { // * default-propagation-route-table - Indicates whether this is the default // propagation route table for the transit gateway (true | false). // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the route table (available | deleting | deleted + // | pending). // // * transit-gateway-id - The ID of the transit gateway. // @@ -64961,9 +70734,9 @@ type DescribeTransitGatewayVpcAttachmentsInput struct { // One or more filters. The possible values are: // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the attachment. Valid values are available | deleted + // | deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance + // | pending | rollingBack | rejected | rejecting. // // * transit-gateway-attachment-id - The ID of the attachment. // @@ -65108,9 +70881,8 @@ type DescribeTransitGatewaysInput struct { // // * owner-id - The ID of the AWS account that owns the transit gateway. // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the transit gateway (available | deleted | deleting + // | modifying | pending). // // * transit-gateway-id - The ID of the transit gateway. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -65484,11 +71256,17 @@ type DescribeVolumesInput struct { // // * encrypted - Indicates whether the volume is encrypted (true | false) // + // * multi-attach-enabled - Indicates whether the volume is enabled for Multi-Attach + // (true | false) + // + // * fast-restored - Indicates whether the volume was created from a snapshot + // that is enabled for fast snapshot restore (true | false). + // // * size - The size of the volume, in GiB. // // * snapshot-id - The snapshot from which the volume was created. // - // * status - The status of the volume (creating | available | in-use | deleting + // * status - The state of the volume (creating | available | in-use | deleting // | deleted | error). // // * tag: - The key/value combination of a tag assigned to the resource. @@ -65504,7 +71282,7 @@ type DescribeVolumesInput struct { // * volume-id - The volume ID. // // * volume-type - The Amazon EBS volume type. This can be gp2 for General - // Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized + // Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized // HDD, sc1 for Cold HDD, or standard for Magnetic volumes. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -65578,9 +71356,34 @@ type DescribeVolumesModificationsInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The filters. Supported filters: volume-id, modification-state, target-size, - // target-iops, target-volume-type, original-size, original-iops, original-volume-type, - // start-time. + // The filters. + // + // * modification-state - The current modification state (modifying | optimizing + // | completed | failed). + // + // * original-iops - The original IOPS rate of the volume. + // + // * original-size - The original size of the volume, in GiB. + // + // * original-volume-type - The original volume type of the volume (standard + // | io1 | io2 | gp2 | sc1 | st1). + // + // * originalMultiAttachEnabled - Indicates whether Multi-Attach support + // was enabled (true | false). + // + // * start-time - The modification start time. + // + // * target-iops - The target IOPS rate of the volume. + // + // * target-size - The target size of the volume, in GiB. + // + // * target-volume-type - The target volume type of the volume (standard + // | io1 | io2 | gp2 | sc1 | st1). + // + // * targetMultiAttachEnabled - Indicates whether Multi-Attach support is + // to be enabled (true | false). + // + // * volume-id - The ID of the volume. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results (up to a limit of 500) to be returned in a @@ -65590,7 +71393,7 @@ type DescribeVolumesModificationsInput struct { // The nextToken value returned by a previous paginated request. NextToken *string `type:"string"` - // The IDs of the volumes for which in-progress modifications will be described. + // The IDs of the volumes. VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` } @@ -66844,6 +72647,9 @@ type DescribeVpcsInput struct { // * ipv6-cidr-block-association.ipv6-cidr-block - An IPv6 CIDR block associated // with the VPC. // + // * ipv6-cidr-block-association.ipv6-pool - The ID of the IPv6 address pool + // from which the IPv6 CIDR block is allocated. + // // * ipv6-cidr-block-association.association-id - The association ID for // an IPv6 CIDR block associated with the VPC. // @@ -67452,7 +73258,8 @@ type DetachVolumeInput struct { // and repair procedures. Force *bool `type:"boolean"` - // The ID of the instance. + // The ID of the instance. If you are detaching a Multi-Attach enabled volume, + // you must specify an instance ID. InstanceId *string `type:"string"` // The ID of the volume. @@ -67897,10 +73704,11 @@ type DisableFastSnapshotRestoreSuccessItem struct { // The time at which fast snapshot restores entered the optimizing state. OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` - // The alias of the snapshot owner. + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. OwnerAlias *string `locationName:"ownerAlias" type:"string"` - // The ID of the AWS account that owns the snapshot. + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the snapshot. @@ -68184,6 +73992,12 @@ func (s *DisableTransitGatewayRouteTablePropagationOutput) SetPropagation(v *Tra type DisableVgwRoutePropagationInput struct { _ struct{} `type:"structure"` + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + // The ID of the virtual private gateway. // // GatewayId is a required field @@ -68221,6 +74035,12 @@ func (s *DisableVgwRoutePropagationInput) Validate() error { return nil } +// SetDryRun sets the DryRun field's value. +func (s *DisableVgwRoutePropagationInput) SetDryRun(v bool) *DisableVgwRoutePropagationInput { + s.DryRun = &v + return s +} + // SetGatewayId sets the GatewayId field's value. func (s *DisableVgwRoutePropagationInput) SetGatewayId(v string) *DisableVgwRoutePropagationInput { s.GatewayId = &v @@ -68520,6 +74340,89 @@ func (s *DisassociateClientVpnTargetNetworkOutput) SetStatus(v *AssociationStatu return s } +type DisassociateEnclaveCertificateIamRoleInput struct { + _ struct{} `type:"structure"` + + // The ARN of the ACM certificate from which to disassociate the IAM role. + CertificateArn *string `min:"1" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ARN of the IAM role to disassociate. + RoleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DisassociateEnclaveCertificateIamRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateEnclaveCertificateIamRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateEnclaveCertificateIamRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateEnclaveCertificateIamRoleInput"} + if s.CertificateArn != nil && len(*s.CertificateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *DisassociateEnclaveCertificateIamRoleInput) SetCertificateArn(v string) *DisassociateEnclaveCertificateIamRoleInput { + s.CertificateArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DisassociateEnclaveCertificateIamRoleInput) SetDryRun(v bool) *DisassociateEnclaveCertificateIamRoleInput { + s.DryRun = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DisassociateEnclaveCertificateIamRoleInput) SetRoleArn(v string) *DisassociateEnclaveCertificateIamRoleInput { + s.RoleArn = &v + return s +} + +type DisassociateEnclaveCertificateIamRoleOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisassociateEnclaveCertificateIamRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateEnclaveCertificateIamRoleOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *DisassociateEnclaveCertificateIamRoleOutput) SetReturn(v bool) *DisassociateEnclaveCertificateIamRoleOutput { + s.Return = &v + return s +} + type DisassociateIamInstanceProfileInput struct { _ struct{} `type:"structure"` @@ -68585,7 +74488,7 @@ type DisassociateRouteTableInput struct { _ struct{} `type:"structure"` // The association ID representing the current association between the route - // table and subnet. + // table and subnet or gateway. // // AssociationId is a required field AssociationId *string `locationName:"associationId" type:"string" required:"true"` @@ -69301,7 +75204,7 @@ type EbsBlockDevice struct { _ struct{} `type:"structure"` // Indicates whether the EBS volume is deleted on instance termination. For - // more information, see Preserving Amazon EBS Volumes on Instance Termination + // more information, see Preserving Amazon EBS volumes on instance termination // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#preserving-volumes-on-termination) // in the Amazon Elastic Compute Cloud User Guide. DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` @@ -69316,25 +75219,28 @@ type EbsBlockDevice struct { // In no case can you remove encryption from an encrypted volume. // // Encrypted volumes can only be attached to instances that support Amazon EBS - // encryption. For more information, see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + // encryption. For more information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + // + // This parameter is not returned by . Encrypted *bool `locationName:"encrypted" type:"boolean"` // The number of I/O operations per second (IOPS) that the volume supports. - // For io1 volumes, this represents the number of IOPS that are provisioned + // For io1 and io2 volumes, this represents the number of IOPS that are provisioned // for the volume. For gp2 volumes, this represents the baseline performance // of the volume and the rate at which the volume accumulates I/O credits for - // bursting. For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // bursting. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS - // for io1 volumes in most Regions. Maximum io1 IOPS of 64,000 is guaranteed - // only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000 IOPS + // for io1 and io2 volumes in most Regions. Maximum io1 and io2 IOPS of 64,000 + // is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). // Other instance families guarantee performance up to 32,000 IOPS. For more // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Condition: This parameter is required for requests to create io1 volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // Condition: This parameter is required for requests to create io1 and io2 + // volumes; it is not used in requests to create gp2, st1, sc1, or standard + // volumes. Iops *int64 `locationName:"iops" type:"integer"` // Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed @@ -69355,15 +75261,15 @@ type EbsBlockDevice struct { // a volume size, the default is the snapshot size. // // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned - // IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for - // Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify + // IOPS SSD (io1 and io2), 500-16384 for Throughput Optimized HDD (st1), 500-16384 + // for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify // a snapshot, the volume size must be equal to or larger than the snapshot // size. VolumeSize *int64 `locationName:"volumeSize" type:"integer"` - // The volume type. If you set the type to io1, you must also specify the Iops - // parameter. If you set the type to gp2, st1, sc1, or standard, you must omit - // the Iops parameter. + // The volume type. If you set the type to io1 or io2, you must also specify + // the Iops parameter. If you set the type to gp2, st1, sc1, or standard, you + // must omit the Iops parameter. // // Default: gp2 VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` @@ -69425,13 +75331,19 @@ func (s *EbsBlockDevice) SetVolumeType(v string) *EbsBlockDevice { type EbsInfo struct { _ struct{} `type:"structure"` - // Indicates that the instance type is Amazon EBS-optimized. For more information, + // Describes the optimized EBS performance for the instance type. + EbsOptimizedInfo *EbsOptimizedInfo `locationName:"ebsOptimizedInfo" type:"structure"` + + // Indicates whether the instance type is Amazon EBS-optimized. For more information, // see Amazon EBS-Optimized Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) // in Amazon EC2 User Guide for Linux Instances. EbsOptimizedSupport *string `locationName:"ebsOptimizedSupport" type:"string" enum:"EbsOptimizedSupport"` // Indicates whether Amazon EBS encryption is supported. EncryptionSupport *string `locationName:"encryptionSupport" type:"string" enum:"EbsEncryptionSupport"` + + // Indicates whether non-volatile memory express (NVMe) is supported. + NvmeSupport *string `locationName:"nvmeSupport" type:"string" enum:"EbsNvmeSupport"` } // String returns the string representation @@ -69444,6 +75356,12 @@ func (s EbsInfo) GoString() string { return s.String() } +// SetEbsOptimizedInfo sets the EbsOptimizedInfo field's value. +func (s *EbsInfo) SetEbsOptimizedInfo(v *EbsOptimizedInfo) *EbsInfo { + s.EbsOptimizedInfo = v + return s +} + // SetEbsOptimizedSupport sets the EbsOptimizedSupport field's value. func (s *EbsInfo) SetEbsOptimizedSupport(v string) *EbsInfo { s.EbsOptimizedSupport = &v @@ -69456,6 +75374,12 @@ func (s *EbsInfo) SetEncryptionSupport(v string) *EbsInfo { return s } +// SetNvmeSupport sets the NvmeSupport field's value. +func (s *EbsInfo) SetNvmeSupport(v string) *EbsInfo { + s.NvmeSupport = &v + return s +} + // Describes a parameter used to set up an EBS volume in a block device mapping. type EbsInstanceBlockDevice struct { _ struct{} `type:"structure"` @@ -69541,6 +75465,81 @@ func (s *EbsInstanceBlockDeviceSpecification) SetVolumeId(v string) *EbsInstance return s } +// Describes the optimized EBS performance for supported instance types. +type EbsOptimizedInfo struct { + _ struct{} `type:"structure"` + + // The baseline bandwidth performance for an EBS-optimized instance type, in + // Mbps. + BaselineBandwidthInMbps *int64 `locationName:"baselineBandwidthInMbps" type:"integer"` + + // The baseline input/output storage operations per seconds for an EBS-optimized + // instance type. + BaselineIops *int64 `locationName:"baselineIops" type:"integer"` + + // The baseline throughput performance for an EBS-optimized instance type, in + // MB/s. + BaselineThroughputInMBps *float64 `locationName:"baselineThroughputInMBps" type:"double"` + + // The maximum bandwidth performance for an EBS-optimized instance type, in + // Mbps. + MaximumBandwidthInMbps *int64 `locationName:"maximumBandwidthInMbps" type:"integer"` + + // The maximum input/output storage operations per second for an EBS-optimized + // instance type. + MaximumIops *int64 `locationName:"maximumIops" type:"integer"` + + // The maximum throughput performance for an EBS-optimized instance type, in + // MB/s. + MaximumThroughputInMBps *float64 `locationName:"maximumThroughputInMBps" type:"double"` +} + +// String returns the string representation +func (s EbsOptimizedInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsOptimizedInfo) GoString() string { + return s.String() +} + +// SetBaselineBandwidthInMbps sets the BaselineBandwidthInMbps field's value. +func (s *EbsOptimizedInfo) SetBaselineBandwidthInMbps(v int64) *EbsOptimizedInfo { + s.BaselineBandwidthInMbps = &v + return s +} + +// SetBaselineIops sets the BaselineIops field's value. +func (s *EbsOptimizedInfo) SetBaselineIops(v int64) *EbsOptimizedInfo { + s.BaselineIops = &v + return s +} + +// SetBaselineThroughputInMBps sets the BaselineThroughputInMBps field's value. +func (s *EbsOptimizedInfo) SetBaselineThroughputInMBps(v float64) *EbsOptimizedInfo { + s.BaselineThroughputInMBps = &v + return s +} + +// SetMaximumBandwidthInMbps sets the MaximumBandwidthInMbps field's value. +func (s *EbsOptimizedInfo) SetMaximumBandwidthInMbps(v int64) *EbsOptimizedInfo { + s.MaximumBandwidthInMbps = &v + return s +} + +// SetMaximumIops sets the MaximumIops field's value. +func (s *EbsOptimizedInfo) SetMaximumIops(v int64) *EbsOptimizedInfo { + s.MaximumIops = &v + return s +} + +// SetMaximumThroughputInMBps sets the MaximumThroughputInMBps field's value. +func (s *EbsOptimizedInfo) SetMaximumThroughputInMBps(v float64) *EbsOptimizedInfo { + s.MaximumThroughputInMBps = &v + return s +} + // Describes an egress-only internet gateway. type EgressOnlyInternetGateway struct { _ struct{} `type:"structure"` @@ -69813,7 +75812,7 @@ type ElasticInferenceAccelerator struct { Count *int64 `min:"1" type:"integer"` // The type of elastic inference accelerator. The possible values are eia1.medium, - // eia1.large, and eia1.xlarge. + // eia1.large, eia1.xlarge, eia2.medium, eia2.large, and eia2.xlarge. // // Type is a required field Type *string `type:"string" required:"true"` @@ -70081,10 +76080,11 @@ type EnableFastSnapshotRestoreSuccessItem struct { // The time at which fast snapshot restores entered the optimizing state. OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` - // The alias of the snapshot owner. + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. OwnerAlias *string `locationName:"ownerAlias" type:"string"` - // The ID of the AWS account that owns the snapshot. + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the snapshot. @@ -70369,6 +76369,12 @@ func (s *EnableTransitGatewayRouteTablePropagationOutput) SetPropagation(v *Tran type EnableVgwRoutePropagationInput struct { _ struct{} `type:"structure"` + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + // The ID of the virtual private gateway that is attached to a VPC. The virtual // private gateway must be attached to the same VPC that the routing tables // are associated with. @@ -70409,6 +76415,12 @@ func (s *EnableVgwRoutePropagationInput) Validate() error { return nil } +// SetDryRun sets the DryRun field's value. +func (s *EnableVgwRoutePropagationInput) SetDryRun(v bool) *EnableVgwRoutePropagationInput { + s.DryRun = &v + return s +} + // SetGatewayId sets the GatewayId field's value. func (s *EnableVgwRoutePropagationInput) SetGatewayId(v string) *EnableVgwRoutePropagationInput { s.GatewayId = &v @@ -70618,6 +76630,57 @@ func (s *EnableVpcClassicLinkOutput) SetReturn(v bool) *EnableVpcClassicLinkOutp return s } +// Indicates whether the instance is enabled for AWS Nitro Enclaves. +type EnclaveOptions struct { + _ struct{} `type:"structure"` + + // If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; + // otherwise, it is not enabled for AWS Nitro Enclaves. + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s EnclaveOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnclaveOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *EnclaveOptions) SetEnabled(v bool) *EnclaveOptions { + s.Enabled = &v + return s +} + +// Indicates whether the instance is enabled for AWS Nitro Enclaves. For more +// information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) +// in the AWS Nitro Enclaves User Guide. +type EnclaveOptionsRequest struct { + _ struct{} `type:"structure"` + + // To enable the instance for AWS Nitro Enclaves, set this parameter to true. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s EnclaveOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnclaveOptionsRequest) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *EnclaveOptionsRequest) SetEnabled(v bool) *EnclaveOptionsRequest { + s.Enabled = &v + return s +} + // Describes an EC2 Fleet or Spot Fleet event. type EventInformation struct { _ struct{} `type:"structure"` @@ -70884,7 +76947,7 @@ type ExportImageInput struct { // Token to enable idempotency for export image requests. ClientToken *string `type:"string" idempotencyToken:"true"` - // A description of the image being exported. The maximum length is 255 bytes. + // A description of the image being exported. The maximum length is 255 characters. Description *string `type:"string"` // The disk image format. @@ -70904,15 +76967,18 @@ type ExportImageInput struct { ImageId *string `type:"string" required:"true"` // The name of the role that grants VM Import/Export permission to export images - // to your S3 bucket. If this parameter is not specified, the default role is - // named 'vmimport'. + // to your Amazon S3 bucket. If this parameter is not specified, the default + // role is named 'vmimport'. RoleName *string `type:"string"` - // Information about the destination S3 bucket. The bucket must exist and grant - // WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // Information about the destination Amazon S3 bucket. The bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. // // S3ExportLocation is a required field S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"` + + // The tags to apply to the image being exported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -70991,6 +77057,12 @@ func (s *ExportImageInput) SetS3ExportLocation(v *ExportTaskS3LocationRequest) * return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ExportImageInput) SetTagSpecifications(v []*TagSpecification) *ExportImageInput { + s.TagSpecifications = v + return s +} + type ExportImageOutput struct { _ struct{} `type:"structure"` @@ -71010,10 +77082,10 @@ type ExportImageOutput struct { Progress *string `locationName:"progress" type:"string"` // The name of the role that grants VM Import/Export permission to export images - // to your S3 bucket. + // to your Amazon S3 bucket. RoleName *string `locationName:"roleName" type:"string"` - // Information about the destination S3 bucket. + // Information about the destination Amazon S3 bucket. S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` // The status of the export image task. The possible values are active, completed, @@ -71022,6 +77094,9 @@ type ExportImageOutput struct { // The status message for the export image task. StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being exported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -71088,6 +77163,12 @@ func (s *ExportImageOutput) SetStatusMessage(v string) *ExportImageOutput { return s } +// SetTags sets the Tags field's value. +func (s *ExportImageOutput) SetTags(v []*Tag) *ExportImageOutput { + s.Tags = v + return s +} + // Describes an export image task. type ExportImageTask struct { _ struct{} `type:"structure"` @@ -71104,7 +77185,7 @@ type ExportImageTask struct { // The percent complete of the export image task. Progress *string `locationName:"progress" type:"string"` - // Information about the destination S3 bucket. + // Information about the destination Amazon S3 bucket. S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` // The status of the export image task. The possible values are active, completed, @@ -71113,6 +77194,9 @@ type ExportImageTask struct { // The status message for the export image task. StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being exported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -71167,6 +77251,12 @@ func (s *ExportImageTask) SetStatusMessage(v string) *ExportImageTask { return s } +// SetTags sets the Tags field's value. +func (s *ExportImageTask) SetTags(v []*Tag) *ExportImageTask { + s.Tags = v + return s +} + // Describes an instance export task. type ExportTask struct { _ struct{} `type:"structure"` @@ -71189,6 +77279,7 @@ type ExportTask struct { // The status message related to the export task. StatusMessage *string `locationName:"statusMessage" type:"string"` + // The tags for the export task. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -71248,7 +77339,7 @@ func (s *ExportTask) SetTags(v []*Tag) *ExportTask { type ExportTaskS3Location struct { _ struct{} `type:"structure"` - // The destination S3 bucket. + // The destination Amazon S3 bucket. S3Bucket *string `locationName:"s3Bucket" type:"string"` // The prefix (logical hierarchy) in the bucket. @@ -71281,7 +77372,7 @@ func (s *ExportTaskS3Location) SetS3Prefix(v string) *ExportTaskS3Location { type ExportTaskS3LocationRequest struct { _ struct{} `type:"structure"` - // The destination S3 bucket. + // The destination Amazon S3 bucket. // // S3Bucket is a required field S3Bucket *string `type:"string" required:"true"` @@ -71336,8 +77427,8 @@ type ExportToS3Task struct { // The format for the exported image. DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` - // The S3 bucket for the destination image. The destination bucket must exist - // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // The Amazon S3 bucket for the destination image. The destination bucket must + // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. S3Bucket *string `locationName:"s3Bucket" type:"string"` // The encryption key for your S3 bucket. @@ -71389,12 +77480,12 @@ type ExportToS3TaskSpecification struct { // The format for the exported image. DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` - // The S3 bucket for the destination image. The destination bucket must exist - // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // The Amazon S3 bucket for the destination image. The destination bucket must + // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. S3Bucket *string `locationName:"s3Bucket" type:"string"` - // The image is written to a single object in the S3 bucket at the S3 key s3prefix - // + exportTaskId + '.' + diskImageFormat. + // The image is written to a single object in the Amazon S3 bucket at the S3 + // key s3prefix + exportTaskId + '.' + diskImageFormat. S3Prefix *string `locationName:"s3Prefix" type:"string"` } @@ -71461,13 +77552,11 @@ type ExportTransitGatewayRoutesInput struct { // routes in your route table and you specify supernet-of-match as 10.0.1.0/30, // then the result returns 10.0.1.0/29. // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the route (active | blackhole). // // * transit-gateway-route-destination-cidr-block - The CIDR range. // - // * type - The type of route (active | blackhole). + // * type - The type of route (propagated | static). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The name of the S3 bucket. @@ -71587,6 +77676,74 @@ func (s *FailedQueuedPurchaseDeletion) SetReservedInstancesId(v string) *FailedQ return s } +// Describes the IAM SAML identity providers used for federated authentication. +type FederatedAuthentication struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider. + SamlProviderArn *string `locationName:"samlProviderArn" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider for the + // self-service portal. + SelfServiceSamlProviderArn *string `locationName:"selfServiceSamlProviderArn" type:"string"` +} + +// String returns the string representation +func (s FederatedAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedAuthentication) GoString() string { + return s.String() +} + +// SetSamlProviderArn sets the SamlProviderArn field's value. +func (s *FederatedAuthentication) SetSamlProviderArn(v string) *FederatedAuthentication { + s.SamlProviderArn = &v + return s +} + +// SetSelfServiceSamlProviderArn sets the SelfServiceSamlProviderArn field's value. +func (s *FederatedAuthentication) SetSelfServiceSamlProviderArn(v string) *FederatedAuthentication { + s.SelfServiceSamlProviderArn = &v + return s +} + +// The IAM SAML identity provider used for federated authentication. +type FederatedAuthenticationRequest struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider. + SAMLProviderArn *string `type:"string"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider for the + // self-service portal. + SelfServiceSAMLProviderArn *string `type:"string"` +} + +// String returns the string representation +func (s FederatedAuthenticationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedAuthenticationRequest) GoString() string { + return s.String() +} + +// SetSAMLProviderArn sets the SAMLProviderArn field's value. +func (s *FederatedAuthenticationRequest) SetSAMLProviderArn(v string) *FederatedAuthenticationRequest { + s.SAMLProviderArn = &v + return s +} + +// SetSelfServiceSAMLProviderArn sets the SelfServiceSAMLProviderArn field's value. +func (s *FederatedAuthenticationRequest) SetSelfServiceSAMLProviderArn(v string) *FederatedAuthenticationRequest { + s.SelfServiceSAMLProviderArn = &v + return s +} + // A filter name and value pair that is used to return a more specific list // of results from a describe operation. Filters can be used to match a set // of resources by specific criteria, such as tags, attributes, or IDs. The @@ -72057,7 +78214,9 @@ type FleetLaunchTemplateOverridesRequest struct { // override has the lowest priority. Priority *float64 `type:"double"` - // The ID of the subnet in which to launch the instances. + // The IDs of the subnets in which to launch the instances. Separate multiple + // subnet IDs using commas (for example, subnet-1234abcdeexample1, subnet-0987cdef6example2). + // A request of type instant can have only one subnet ID. SubnetId *string `type:"string"` // The number of units provided by the specified instance type. @@ -72116,19 +78275,30 @@ func (s *FleetLaunchTemplateOverridesRequest) SetWeightedCapacity(v float64) *Fl return s } -// Describes a launch template. +// Describes the Amazon EC2 launch template and the launch template version +// that can be used by a Spot Fleet request to configure Amazon EC2 instances. +// For information about launch templates, see Launching an instance from a +// launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) +// in the Amazon EC2 User Guide for Linux Instances. type FleetLaunchTemplateSpecification struct { _ struct{} `type:"structure"` - // The ID of the launch template. You must specify either a template ID or a - // template name. + // The ID of the launch template. If you specify the template ID, you can't + // specify the template name. LaunchTemplateId *string `locationName:"launchTemplateId" type:"string"` - // The name of the launch template. You must specify either a template name - // or a template ID. + // The name of the launch template. If you specify the template name, you can't + // specify the template ID. LaunchTemplateName *string `locationName:"launchTemplateName" min:"3" type:"string"` - // The version number of the launch template. You must specify a version number. + // The launch template version number, $Latest, or $Default. You must specify + // a value, otherwise the request fails. + // + // If the value is $Latest, Amazon EC2 uses the latest version of the launch + // template. + // + // If the value is $Default, Amazon EC2 uses the default version of the launch + // template. Version *string `locationName:"version" type:"string"` } @@ -72173,19 +78343,30 @@ func (s *FleetLaunchTemplateSpecification) SetVersion(v string) *FleetLaunchTemp return s } -// The launch template to use. You must specify either the launch template ID -// or launch template name in the request. +// Describes the Amazon EC2 launch template and the launch template version +// that can be used by an EC2 Fleet to configure Amazon EC2 instances. For information +// about launch templates, see Launching an instance from a launch template +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) +// in the Amazon Elastic Compute Cloud User Guide. type FleetLaunchTemplateSpecificationRequest struct { _ struct{} `type:"structure"` - // The ID of the launch template. + // The ID of the launch template. If you specify the template ID, you can't + // specify the template name. LaunchTemplateId *string `type:"string"` - // The name of the launch template. + // The name of the launch template. If you specify the template name, you can't + // specify the template ID. LaunchTemplateName *string `min:"3" type:"string"` - // The version number of the launch template. Note: This is a required parameter - // and will be updated soon. + // The launch template version number, $Latest, or $Default. You must specify + // a value, otherwise the request fails. + // + // If the value is $Latest, Amazon EC2 uses the latest version of the launch + // template. + // + // If the value is $Default, Amazon EC2 uses the default version of the launch + // template. Version *string `type:"string"` } @@ -72230,6 +78411,126 @@ func (s *FleetLaunchTemplateSpecificationRequest) SetVersion(v string) *FleetLau return s } +// The strategy to use when Amazon EC2 emits a signal that your Spot Instance +// is at an elevated risk of being interrupted. +type FleetSpotCapacityRebalance struct { + _ struct{} `type:"structure"` + + // To allow EC2 Fleet to launch a replacement Spot Instance when an instance + // rebalance notification is emitted for an existing Spot Instance in the fleet, + // specify launch. Only available for fleets of type maintain. + // + // When a replacement instance is launched, the instance marked for rebalance + // is not automatically terminated. You can terminate it, or you can wait until + // Amazon EC2 interrupts it. You are charged for both instances while they are + // running. + ReplacementStrategy *string `locationName:"replacementStrategy" type:"string" enum:"FleetReplacementStrategy"` +} + +// String returns the string representation +func (s FleetSpotCapacityRebalance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotCapacityRebalance) GoString() string { + return s.String() +} + +// SetReplacementStrategy sets the ReplacementStrategy field's value. +func (s *FleetSpotCapacityRebalance) SetReplacementStrategy(v string) *FleetSpotCapacityRebalance { + s.ReplacementStrategy = &v + return s +} + +// The Spot Instance replacement strategy to use when Amazon EC2 emits a signal +// that your Spot Instance is at an elevated risk of being interrupted. For +// more information, see Capacity rebalancing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#ec2-fleet-capacity-rebalance) +// in the Amazon Elastic Compute Cloud User Guide. +type FleetSpotCapacityRebalanceRequest struct { + _ struct{} `type:"structure"` + + // The replacement strategy to use. Only available for fleets of type maintain. + // + // To allow EC2 Fleet to launch a replacement Spot Instance when an instance + // rebalance notification is emitted for an existing Spot Instance in the fleet, + // specify launch. You must specify a value, otherwise you get an error. + // + // When a replacement instance is launched, the instance marked for rebalance + // is not automatically terminated. You can terminate it, or you can wait until + // Amazon EC2 interrupts it. You are charged for all instances while they are + // running. + ReplacementStrategy *string `type:"string" enum:"FleetReplacementStrategy"` +} + +// String returns the string representation +func (s FleetSpotCapacityRebalanceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotCapacityRebalanceRequest) GoString() string { + return s.String() +} + +// SetReplacementStrategy sets the ReplacementStrategy field's value. +func (s *FleetSpotCapacityRebalanceRequest) SetReplacementStrategy(v string) *FleetSpotCapacityRebalanceRequest { + s.ReplacementStrategy = &v + return s +} + +// The strategies for managing your Spot Instances that are at an elevated risk +// of being interrupted. +type FleetSpotMaintenanceStrategies struct { + _ struct{} `type:"structure"` + + // The strategy to use when Amazon EC2 emits a signal that your Spot Instance + // is at an elevated risk of being interrupted. + CapacityRebalance *FleetSpotCapacityRebalance `locationName:"capacityRebalance" type:"structure"` +} + +// String returns the string representation +func (s FleetSpotMaintenanceStrategies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotMaintenanceStrategies) GoString() string { + return s.String() +} + +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *FleetSpotMaintenanceStrategies) SetCapacityRebalance(v *FleetSpotCapacityRebalance) *FleetSpotMaintenanceStrategies { + s.CapacityRebalance = v + return s +} + +// The strategies for managing your Spot Instances that are at an elevated risk +// of being interrupted. +type FleetSpotMaintenanceStrategiesRequest struct { + _ struct{} `type:"structure"` + + // The strategy to use when Amazon EC2 emits a signal that your Spot Instance + // is at an elevated risk of being interrupted. + CapacityRebalance *FleetSpotCapacityRebalanceRequest `type:"structure"` +} + +// String returns the string representation +func (s FleetSpotMaintenanceStrategiesRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetSpotMaintenanceStrategiesRequest) GoString() string { + return s.String() +} + +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *FleetSpotMaintenanceStrategiesRequest) SetCapacityRebalance(v *FleetSpotCapacityRebalanceRequest) *FleetSpotMaintenanceStrategiesRequest { + s.CapacityRebalance = v + return s +} + // Describes a flow log. type FlowLog struct { _ struct{} `type:"structure"` @@ -72275,9 +78576,22 @@ type FlowLog struct { // The name of the flow log group. LogGroupName *string `locationName:"logGroupName" type:"string"` + // The maximum interval of time, in seconds, during which a flow of packets + // is captured and aggregated into a flow log record. + // + // When a network interface is attached to a Nitro-based instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances), + // the aggregation interval is always 60 seconds (1 minute) or less, regardless + // of the specified value. + // + // Valid Values: 60 | 600 + MaxAggregationInterval *int64 `locationName:"maxAggregationInterval" type:"integer"` + // The ID of the resource on which the flow log was created. ResourceId *string `locationName:"resourceId" type:"string"` + // The tags for the flow log. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + // The type of traffic captured for the flow log. TrafficType *string `locationName:"trafficType" type:"string" enum:"TrafficType"` } @@ -72352,12 +78666,24 @@ func (s *FlowLog) SetLogGroupName(v string) *FlowLog { return s } +// SetMaxAggregationInterval sets the MaxAggregationInterval field's value. +func (s *FlowLog) SetMaxAggregationInterval(v int64) *FlowLog { + s.MaxAggregationInterval = &v + return s +} + // SetResourceId sets the ResourceId field's value. func (s *FlowLog) SetResourceId(v string) *FlowLog { s.ResourceId = &v return s } +// SetTags sets the Tags field's value. +func (s *FlowLog) SetTags(v []*Tag) *FlowLog { + s.Tags = v + return s +} + // SetTrafficType sets the TrafficType field's value. func (s *FlowLog) SetTrafficType(v string) *FlowLog { s.TrafficType = &v @@ -72419,7 +78745,7 @@ func (s *FpgaDeviceInfo) SetName(v string) *FpgaDeviceInfo { type FpgaDeviceMemoryInfo struct { _ struct{} `type:"structure"` - // The size (in MiB) for the memory available to the FPGA accelerator. + // The size of the memory available to the FPGA accelerator, in MiB. SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"` } @@ -72724,6 +79050,183 @@ func (s *FpgaInfo) SetTotalFpgaMemoryInMiB(v int64) *FpgaInfo { return s } +type GetAssociatedEnclaveCertificateIamRolesInput struct { + _ struct{} `type:"structure"` + + // The ARN of the ACM certificate for which to view the associated IAM roles, + // encryption keys, and Amazon S3 object information. + CertificateArn *string `min:"1" type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAssociatedEnclaveCertificateIamRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAssociatedEnclaveCertificateIamRolesInput"} + if s.CertificateArn != nil && len(*s.CertificateArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *GetAssociatedEnclaveCertificateIamRolesInput) SetCertificateArn(v string) *GetAssociatedEnclaveCertificateIamRolesInput { + s.CertificateArn = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetAssociatedEnclaveCertificateIamRolesInput) SetDryRun(v bool) *GetAssociatedEnclaveCertificateIamRolesInput { + s.DryRun = &v + return s +} + +type GetAssociatedEnclaveCertificateIamRolesOutput struct { + _ struct{} `type:"structure"` + + // Information about the associated IAM roles. + AssociatedRoles []*AssociatedRole `locationName:"associatedRoleSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedEnclaveCertificateIamRolesOutput) GoString() string { + return s.String() +} + +// SetAssociatedRoles sets the AssociatedRoles field's value. +func (s *GetAssociatedEnclaveCertificateIamRolesOutput) SetAssociatedRoles(v []*AssociatedRole) *GetAssociatedEnclaveCertificateIamRolesOutput { + s.AssociatedRoles = v + return s +} + +type GetAssociatedIpv6PoolCidrsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the IPv6 address pool. + // + // PoolId is a required field + PoolId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAssociatedIpv6PoolCidrsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedIpv6PoolCidrsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAssociatedIpv6PoolCidrsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAssociatedIpv6PoolCidrsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.PoolId == nil { + invalidParams.Add(request.NewErrParamRequired("PoolId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetAssociatedIpv6PoolCidrsInput) SetDryRun(v bool) *GetAssociatedIpv6PoolCidrsInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetAssociatedIpv6PoolCidrsInput) SetMaxResults(v int64) *GetAssociatedIpv6PoolCidrsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetAssociatedIpv6PoolCidrsInput) SetNextToken(v string) *GetAssociatedIpv6PoolCidrsInput { + s.NextToken = &v + return s +} + +// SetPoolId sets the PoolId field's value. +func (s *GetAssociatedIpv6PoolCidrsInput) SetPoolId(v string) *GetAssociatedIpv6PoolCidrsInput { + s.PoolId = &v + return s +} + +type GetAssociatedIpv6PoolCidrsOutput struct { + _ struct{} `type:"structure"` + + // Information about the IPv6 CIDR block associations. + Ipv6CidrAssociations []*Ipv6CidrAssociation `locationName:"ipv6CidrAssociationSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetAssociatedIpv6PoolCidrsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAssociatedIpv6PoolCidrsOutput) GoString() string { + return s.String() +} + +// SetIpv6CidrAssociations sets the Ipv6CidrAssociations field's value. +func (s *GetAssociatedIpv6PoolCidrsOutput) SetIpv6CidrAssociations(v []*Ipv6CidrAssociation) *GetAssociatedIpv6PoolCidrsOutput { + s.Ipv6CidrAssociations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetAssociatedIpv6PoolCidrsOutput) SetNextToken(v string) *GetAssociatedIpv6PoolCidrsOutput { + s.NextToken = &v + return s +} + type GetCapacityReservationUsageInput struct { _ struct{} `type:"structure"` @@ -72740,12 +79243,13 @@ type GetCapacityReservationUsageInput struct { // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned - // nextToken value. + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. // // Valid range: Minimum value of 1. Maximum value of 1000. MaxResults *int64 `min:"1" type:"integer"` - // The token to retrieve the next page of results. + // The token to use to retrieve the next page of results. NextToken *string `type:"string"` } @@ -73392,6 +79896,114 @@ func (s *GetEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *Get return s } +type GetGroupsForCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The ID of the Capacity Reservation. + // + // CapacityReservationId is a required field + CapacityReservationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetGroupsForCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupsForCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupsForCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGroupsForCapacityReservationInput"} + if s.CapacityReservationId == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *GetGroupsForCapacityReservationInput) SetCapacityReservationId(v string) *GetGroupsForCapacityReservationInput { + s.CapacityReservationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetGroupsForCapacityReservationInput) SetDryRun(v bool) *GetGroupsForCapacityReservationInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetGroupsForCapacityReservationInput) SetMaxResults(v int64) *GetGroupsForCapacityReservationInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetGroupsForCapacityReservationInput) SetNextToken(v string) *GetGroupsForCapacityReservationInput { + s.NextToken = &v + return s +} + +type GetGroupsForCapacityReservationOutput struct { + _ struct{} `type:"structure"` + + // Information about the resource groups to which the Capacity Reservation has + // been added. + CapacityReservationGroups []*CapacityReservationGroup `locationName:"capacityReservationGroupSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetGroupsForCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupsForCapacityReservationOutput) GoString() string { + return s.String() +} + +// SetCapacityReservationGroups sets the CapacityReservationGroups field's value. +func (s *GetGroupsForCapacityReservationOutput) SetCapacityReservationGroups(v []*CapacityReservationGroup) *GetGroupsForCapacityReservationOutput { + s.CapacityReservationGroups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetGroupsForCapacityReservationOutput) SetNextToken(v string) *GetGroupsForCapacityReservationOutput { + s.NextToken = &v + return s +} + type GetHostReservationPurchasePreviewInput struct { _ struct{} `type:"structure"` @@ -73569,6 +80181,226 @@ func (s *GetLaunchTemplateDataOutput) SetLaunchTemplateData(v *ResponseLaunchTem return s } +type GetManagedPrefixListAssociationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetManagedPrefixListAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetManagedPrefixListAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetManagedPrefixListAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetManagedPrefixListAssociationsInput) SetDryRun(v bool) *GetManagedPrefixListAssociationsInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetManagedPrefixListAssociationsInput) SetMaxResults(v int64) *GetManagedPrefixListAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListAssociationsInput) SetNextToken(v string) *GetManagedPrefixListAssociationsInput { + s.NextToken = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *GetManagedPrefixListAssociationsInput) SetPrefixListId(v string) *GetManagedPrefixListAssociationsInput { + s.PrefixListId = &v + return s +} + +type GetManagedPrefixListAssociationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the associations. + PrefixListAssociations []*PrefixListAssociation `locationName:"prefixListAssociationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetManagedPrefixListAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListAssociationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListAssociationsOutput) SetNextToken(v string) *GetManagedPrefixListAssociationsOutput { + s.NextToken = &v + return s +} + +// SetPrefixListAssociations sets the PrefixListAssociations field's value. +func (s *GetManagedPrefixListAssociationsOutput) SetPrefixListAssociations(v []*PrefixListAssociation) *GetManagedPrefixListAssociationsOutput { + s.PrefixListAssociations = v + return s +} + +type GetManagedPrefixListEntriesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The version of the prefix list for which to return the entries. The default + // is the current version. + TargetVersion *int64 `type:"long"` +} + +// String returns the string representation +func (s GetManagedPrefixListEntriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListEntriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetManagedPrefixListEntriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetManagedPrefixListEntriesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetManagedPrefixListEntriesInput) SetDryRun(v bool) *GetManagedPrefixListEntriesInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetManagedPrefixListEntriesInput) SetMaxResults(v int64) *GetManagedPrefixListEntriesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListEntriesInput) SetNextToken(v string) *GetManagedPrefixListEntriesInput { + s.NextToken = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *GetManagedPrefixListEntriesInput) SetPrefixListId(v string) *GetManagedPrefixListEntriesInput { + s.PrefixListId = &v + return s +} + +// SetTargetVersion sets the TargetVersion field's value. +func (s *GetManagedPrefixListEntriesInput) SetTargetVersion(v int64) *GetManagedPrefixListEntriesInput { + s.TargetVersion = &v + return s +} + +type GetManagedPrefixListEntriesOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list entries. + Entries []*PrefixListEntry `locationName:"entrySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetManagedPrefixListEntriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListEntriesOutput) GoString() string { + return s.String() +} + +// SetEntries sets the Entries field's value. +func (s *GetManagedPrefixListEntriesOutput) SetEntries(v []*PrefixListEntry) *GetManagedPrefixListEntriesOutput { + s.Entries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListEntriesOutput) SetNextToken(v string) *GetManagedPrefixListEntriesOutput { + s.NextToken = &v + return s +} + type GetPasswordDataInput struct { _ struct{} `type:"structure"` @@ -74065,6 +80897,137 @@ func (s *GetTransitGatewayMulticastDomainAssociationsOutput) SetNextToken(v stri return s } +type GetTransitGatewayPrefixListReferencesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * attachment.resource-id - The ID of the resource for the attachment. + // + // * attachment.resource-type - The type of resource for the attachment. + // Valid values are vpc | vpn | direct-connect-gateway | peering. + // + // * attachment.transit-gateway-attachment-id - The ID of the attachment. + // + // * is-blackhole - Whether traffic matching the route is blocked (true | + // false). + // + // * prefix-list-id - The ID of the prefix list. + // + // * prefix-list-owner-id - The ID of the owner of the prefix list. + // + // * state - The state of the prefix list reference (pending | available + // | modifying | deleting). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTransitGatewayPrefixListReferencesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayPrefixListReferencesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTransitGatewayPrefixListReferencesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTransitGatewayPrefixListReferencesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetDryRun(v bool) *GetTransitGatewayPrefixListReferencesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetFilters(v []*Filter) *GetTransitGatewayPrefixListReferencesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetMaxResults(v int64) *GetTransitGatewayPrefixListReferencesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetNextToken(v string) *GetTransitGatewayPrefixListReferencesInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetTransitGatewayRouteTableId(v string) *GetTransitGatewayPrefixListReferencesInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type GetTransitGatewayPrefixListReferencesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the prefix list references. + TransitGatewayPrefixListReferences []*TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReferenceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetTransitGatewayPrefixListReferencesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayPrefixListReferencesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayPrefixListReferencesOutput) SetNextToken(v string) *GetTransitGatewayPrefixListReferencesOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayPrefixListReferences sets the TransitGatewayPrefixListReferences field's value. +func (s *GetTransitGatewayPrefixListReferencesOutput) SetTransitGatewayPrefixListReferences(v []*TransitGatewayPrefixListReference) *GetTransitGatewayPrefixListReferencesOutput { + s.TransitGatewayPrefixListReferences = v + return s +} + type GetTransitGatewayRouteTableAssociationsInput struct { _ struct{} `type:"structure"` @@ -74078,7 +81041,8 @@ type GetTransitGatewayRouteTableAssociationsInput struct { // // * resource-id - The ID of the resource. // - // * resource-type - The resource type (vpc | vpn). + // * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway + // | peering. // // * transit-gateway-attachment-id - The ID of the attachment. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -74198,7 +81162,8 @@ type GetTransitGatewayRouteTablePropagationsInput struct { // // * resource-id - The ID of the resource. // - // * resource-type - The resource type (vpc | vpn). + // * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway + // | peering. // // * transit-gateway-attachment-id - The ID of the attachment. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -74360,7 +81325,7 @@ func (s *GpuDeviceInfo) SetName(v string) *GpuDeviceInfo { type GpuDeviceMemoryInfo struct { _ struct{} `type:"structure"` - // The size (in MiB) for the memory available to the GPU accelerator. + // The size of the memory available to the GPU accelerator, in MiB. SizeInMiB *int64 `locationName:"sizeInMiB" type:"integer"` } @@ -74387,7 +81352,8 @@ type GpuInfo struct { // Describes the GPU accelerators for the instance type. Gpus []*GpuDeviceInfo `locationName:"gpus" locationNameList:"item" type:"list"` - // The total size of the memory for the GPU accelerators for the instance type. + // The total size of the memory for the GPU accelerators for the instance type, + // in MiB. TotalGpuMemoryInMiB *int64 `locationName:"totalGpuMemoryInMiB" type:"integer"` } @@ -74448,7 +81414,7 @@ func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. type HibernationOptions struct { _ struct{} `type:"structure"` @@ -74476,7 +81442,7 @@ func (s *HibernationOptions) SetConfigured(v bool) *HibernationOptions { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. type HibernationOptionsRequest struct { _ struct{} `type:"structure"` @@ -75398,6 +82364,11 @@ type Image struct { // This value is set to windows for Windows AMIs; otherwise, it is blank. Platform *string `locationName:"platform" type:"string" enum:"PlatformValues"` + // The platform details associated with the billing code of the AMI. For more + // information, see Obtaining Billing Information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) + // in the Amazon Elastic Compute Cloud User Guide. + PlatformDetails *string `locationName:"platformDetails" type:"string"` + // Any product codes associated with the AMI. ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"` @@ -75431,6 +82402,14 @@ type Image struct { // Any tags assigned to the image. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + // The operation of the Amazon EC2 instance and the billing code that is associated + // with the AMI. usageOperation corresponds to the lineitem/Operation (https://docs.aws.amazon.com/cur/latest/userguide/Lineitem-columns.html#Lineitem-details-O-Operation) + // column on your AWS Cost and Usage Report and in the AWS Price List API (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/price-changes.html). + // For the list of UsageOperation codes, see Platform Details and Usage Operation + // Billing Codes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html#billing-info) + // in the Amazon Elastic Compute Cloud User Guide. + UsageOperation *string `locationName:"usageOperation" type:"string"` + // The type of virtualization of the AMI. VirtualizationType *string `locationName:"virtualizationType" type:"string" enum:"VirtualizationType"` } @@ -75529,6 +82508,12 @@ func (s *Image) SetPlatform(v string) *Image { return s } +// SetPlatformDetails sets the PlatformDetails field's value. +func (s *Image) SetPlatformDetails(v string) *Image { + s.PlatformDetails = &v + return s +} + // SetProductCodes sets the ProductCodes field's value. func (s *Image) SetProductCodes(v []*ProductCode) *Image { s.ProductCodes = v @@ -75583,6 +82568,12 @@ func (s *Image) SetTags(v []*Tag) *Image { return s } +// SetUsageOperation sets the UsageOperation field's value. +func (s *Image) SetUsageOperation(v string) *Image { + s.UsageOperation = &v + return s +} + // SetVirtualizationType sets the VirtualizationType field's value. func (s *Image) SetVirtualizationType(v string) *Image { s.VirtualizationType = &v @@ -75601,7 +82592,7 @@ type ImageDiskContainer struct { // The format of the disk image being imported. // - // Valid values: VHD | VMDK | OVA + // Valid values: OVA | VHD | VHDX |VMDK Format *string `type:"string"` // The ID of the EBS snapshot to be used for importing the snapshot. @@ -75843,6 +82834,9 @@ type ImportImageInput struct { // The name of the role to use when not using the default role, 'vmimport'. RoleName *string `type:"string"` + + // The tags to apply to the image being imported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -75933,6 +82927,12 @@ func (s *ImportImageInput) SetRoleName(v string) *ImportImageInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportImageInput) SetTagSpecifications(v []*TagSpecification) *ImportImageInput { + s.TagSpecifications = v + return s +} + // The request information of license configurations. type ImportImageLicenseConfigurationRequest struct { _ struct{} `type:"structure"` @@ -75990,7 +82990,7 @@ type ImportImageOutput struct { // A description of the import task. Description *string `locationName:"description" type:"string"` - // Indicates whether the AMI is encypted. + // Indicates whether the AMI is encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` // The target hypervisor of the import task. @@ -76026,6 +83026,9 @@ type ImportImageOutput struct { // A detailed status message of the import task. StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being imported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -76122,6 +83125,12 @@ func (s *ImportImageOutput) SetStatusMessage(v string) *ImportImageOutput { return s } +// SetTags sets the Tags field's value. +func (s *ImportImageOutput) SetTags(v []*Tag) *ImportImageOutput { + s.Tags = v + return s +} + // Describes an import image task. type ImportImageTask struct { _ struct{} `type:"structure"` @@ -76174,7 +83183,7 @@ type ImportImageTask struct { // A descriptive status message for the import image task. StatusMessage *string `locationName:"statusMessage" type:"string"` - // Any tags applied to the import image task. + // The tags for the import image task. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -76655,6 +83664,9 @@ type ImportKeyPairInput struct { // // PublicKeyMaterial is a required field PublicKeyMaterial []byte `locationName:"publicKeyMaterial" type:"blob" required:"true"` + + // The tags to apply to the imported key pair. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -76701,6 +83713,12 @@ func (s *ImportKeyPairInput) SetPublicKeyMaterial(v []byte) *ImportKeyPairInput return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportKeyPairInput) SetTagSpecifications(v []*TagSpecification) *ImportKeyPairInput { + s.TagSpecifications = v + return s +} + type ImportKeyPairOutput struct { _ struct{} `type:"structure"` @@ -76709,6 +83727,12 @@ type ImportKeyPairOutput struct { // The key pair name you provided. KeyName *string `locationName:"keyName" type:"string"` + + // The ID of the resulting key pair. + KeyPairId *string `locationName:"keyPairId" type:"string"` + + // The tags applied to the imported key pair. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -76733,6 +83757,18 @@ func (s *ImportKeyPairOutput) SetKeyName(v string) *ImportKeyPairOutput { return s } +// SetKeyPairId sets the KeyPairId field's value. +func (s *ImportKeyPairOutput) SetKeyPairId(v string) *ImportKeyPairOutput { + s.KeyPairId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ImportKeyPairOutput) SetTags(v []*Tag) *ImportKeyPairOutput { + s.Tags = v + return s +} + type ImportSnapshotInput struct { _ struct{} `type:"structure"` @@ -76795,6 +83831,9 @@ type ImportSnapshotInput struct { // The name of the role to use when not using the default role, 'vmimport'. RoleName *string `type:"string"` + + // The tags to apply to the snapshot being imported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -76855,6 +83894,12 @@ func (s *ImportSnapshotInput) SetRoleName(v string) *ImportSnapshotInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportSnapshotInput) SetTagSpecifications(v []*TagSpecification) *ImportSnapshotInput { + s.TagSpecifications = v + return s +} + type ImportSnapshotOutput struct { _ struct{} `type:"structure"` @@ -76866,6 +83911,9 @@ type ImportSnapshotOutput struct { // Information about the import snapshot task. SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` + + // Any tags assigned to the snapshot being imported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -76896,6 +83944,12 @@ func (s *ImportSnapshotOutput) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *Imp return s } +// SetTags sets the Tags field's value. +func (s *ImportSnapshotOutput) SetTags(v []*Tag) *ImportSnapshotOutput { + s.Tags = v + return s +} + // Describes an import snapshot task. type ImportSnapshotTask struct { _ struct{} `type:"structure"` @@ -76909,7 +83963,7 @@ type ImportSnapshotTask struct { // Describes an import snapshot task. SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` - // Any tags applied to the import snapshot task. + // The tags for the import snapshot task. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -77235,10 +84289,14 @@ type Instance struct { // Specifies whether enhanced networking with ENA is enabled. EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + // Indicates whether the instance is enabled for AWS Nitro Enclaves. + EnclaveOptions *EnclaveOptions `locationName:"enclaveOptions" type:"structure"` + // Indicates whether the instance is enabled for hibernation. HibernationOptions *HibernationOptions `locationName:"hibernationOptions" type:"structure"` - // The hypervisor type of the instance. + // The hypervisor type of the instance. The value xen is used for both Xen and + // Nitro hypervisors. Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` // The IAM instance profile associated with the instance, if applicable. @@ -77308,7 +84366,11 @@ type Instance struct { // name is only available if you've enabled DNS hostnames for your VPC. PublicDnsName *string `locationName:"dnsName" type:"string"` - // The public IPv4 address assigned to the instance, if applicable. + // The public IPv4 address, or the Carrier IP address assigned to the instance, + // if applicable. + // + // A Carrier IP address only applies to an instance launched in a subnet associated + // with a Wavelength Zone. PublicIpAddress *string `locationName:"ipAddress" type:"string"` // The RAM disk associated with this instance, if applicable. @@ -77437,6 +84499,12 @@ func (s *Instance) SetEnaSupport(v bool) *Instance { return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *Instance) SetEnclaveOptions(v *EnclaveOptions) *Instance { + s.EnclaveOptions = v + return s +} + // SetHibernationOptions sets the HibernationOptions field's value. func (s *Instance) SetHibernationOptions(v *HibernationOptions) *Instance { s.HibernationOptions = v @@ -78373,6 +85441,9 @@ func (s *InstanceNetworkInterface) SetVpcId(v string) *InstanceNetworkInterface type InstanceNetworkInterfaceAssociation struct { _ struct{} `type:"structure"` + // The carrier IP address associated with the network interface. + CarrierIp *string `locationName:"carrierIp" type:"string"` + // The ID of the owner of the Elastic IP address. IpOwnerId *string `locationName:"ipOwnerId" type:"string"` @@ -78393,6 +85464,12 @@ func (s InstanceNetworkInterfaceAssociation) GoString() string { return s.String() } +// SetCarrierIp sets the CarrierIp field's value. +func (s *InstanceNetworkInterfaceAssociation) SetCarrierIp(v string) *InstanceNetworkInterfaceAssociation { + s.CarrierIp = &v + return s +} + // SetIpOwnerId sets the IpOwnerId field's value. func (s *InstanceNetworkInterfaceAssociation) SetIpOwnerId(v string) *InstanceNetworkInterfaceAssociation { s.IpOwnerId = &v @@ -78427,6 +85504,9 @@ type InstanceNetworkInterfaceAttachment struct { // The index of the device on the instance for the network interface attachment. DeviceIndex *int64 `locationName:"deviceIndex" type:"integer"` + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + // The attachment state. Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` } @@ -78465,6 +85545,12 @@ func (s *InstanceNetworkInterfaceAttachment) SetDeviceIndex(v int64) *InstanceNe return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *InstanceNetworkInterfaceAttachment) SetNetworkCardIndex(v int64) *InstanceNetworkInterfaceAttachment { + s.NetworkCardIndex = &v + return s +} + // SetStatus sets the Status field's value. func (s *InstanceNetworkInterfaceAttachment) SetStatus(v string) *InstanceNetworkInterfaceAttachment { s.Status = &v @@ -78475,6 +85561,13 @@ func (s *InstanceNetworkInterfaceAttachment) SetStatus(v string) *InstanceNetwor type InstanceNetworkInterfaceSpecification struct { _ struct{} `type:"structure"` + // Indicates whether to assign a carrier IP address to the network interface. + // + // You can only assign a carrier IP address to a network interface that is in + // a subnet in a Wavelength Zone. For more information about carrier IP addresses, + // see Carrier IP addresses in the AWS Wavelength Developer Guide. + AssociateCarrierIpAddress *bool `type:"boolean"` + // Indicates whether to assign a public IPv4 address to an instance you launch // in a VPC. The public IP address can only be assigned to a network interface // for eth0, and can only be assigned to a new network interface, not an existing @@ -78502,8 +85595,10 @@ type InstanceNetworkInterfaceSpecification struct { // creating a network interface when launching an instance. Groups []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` - // The type of network interface. To create an Elastic Fabric Adapter (EFA), - // specify efa. For more information, see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) + // The type of network interface. + // + // To create an Elastic Fabric Adapter (EFA), specify efa. For more information, + // see Elastic Fabric Adapter (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html) // in the Amazon Elastic Compute Cloud User Guide. // // If you are not creating an EFA, specify interface or omit this parameter. @@ -78524,7 +85619,15 @@ type InstanceNetworkInterfaceSpecification struct { // number of instances to launch. Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" queryName:"Ipv6Addresses" locationNameList:"item" type:"list"` + // The index of the network card. Some instance types support multiple network + // cards. The primary network interface must be assigned to network card index + // 0. The default is network card index 0. + NetworkCardIndex *int64 `type:"integer"` + // The ID of the network interface. + // + // If you are creating a Spot Fleet, omit this parameter because you can’t + // specify a network interface ID in a launch specification. NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` // The private IPv4 address of the network interface. Applies only if creating @@ -78562,6 +85665,12 @@ func (s InstanceNetworkInterfaceSpecification) GoString() string { return s.String() } +// SetAssociateCarrierIpAddress sets the AssociateCarrierIpAddress field's value. +func (s *InstanceNetworkInterfaceSpecification) SetAssociateCarrierIpAddress(v bool) *InstanceNetworkInterfaceSpecification { + s.AssociateCarrierIpAddress = &v + return s +} + // SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. func (s *InstanceNetworkInterfaceSpecification) SetAssociatePublicIpAddress(v bool) *InstanceNetworkInterfaceSpecification { s.AssociatePublicIpAddress = &v @@ -78610,6 +85719,12 @@ func (s *InstanceNetworkInterfaceSpecification) SetIpv6Addresses(v []*InstanceIp return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *InstanceNetworkInterfaceSpecification) SetNetworkCardIndex(v int64) *InstanceNetworkInterfaceSpecification { + s.NetworkCardIndex = &v + return s +} + // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *InstanceNetworkInterfaceSpecification) SetNetworkInterfaceId(v string) *InstanceNetworkInterfaceSpecification { s.NetworkInterfaceId = &v @@ -79060,9 +86175,13 @@ func (s *InstanceStatusSummary) SetStatus(v string) *InstanceStatusSummary { type InstanceStorageInfo struct { _ struct{} `type:"structure"` - // Array describing the disks that are available for the instance type. + // Describes the disks that are available for the instance type. Disks []*DiskInfo `locationName:"disks" locationNameList:"item" type:"list"` + // Indicates whether non-volatile memory express (NVMe) is supported for instance + // store. + NvmeSupport *string `locationName:"nvmeSupport" type:"string" enum:"EphemeralNvmeSupport"` + // The total size of the disks, in GB. TotalSizeInGB *int64 `locationName:"totalSizeInGB" type:"long"` } @@ -79083,12 +86202,53 @@ func (s *InstanceStorageInfo) SetDisks(v []*DiskInfo) *InstanceStorageInfo { return s } +// SetNvmeSupport sets the NvmeSupport field's value. +func (s *InstanceStorageInfo) SetNvmeSupport(v string) *InstanceStorageInfo { + s.NvmeSupport = &v + return s +} + // SetTotalSizeInGB sets the TotalSizeInGB field's value. func (s *InstanceStorageInfo) SetTotalSizeInGB(v int64) *InstanceStorageInfo { s.TotalSizeInGB = &v return s } +// Describes the registered tag keys for the current Region. +type InstanceTagNotificationAttribute struct { + _ struct{} `type:"structure"` + + // Indicates wheter all tag keys in the current Region are registered to appear + // in scheduled event notifications. true indicates that all tag keys in the + // current Region are registered. + IncludeAllTagsOfInstance *bool `locationName:"includeAllTagsOfInstance" type:"boolean"` + + // The registered tag keys. + InstanceTagKeys []*string `locationName:"instanceTagKeySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s InstanceTagNotificationAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceTagNotificationAttribute) GoString() string { + return s.String() +} + +// SetIncludeAllTagsOfInstance sets the IncludeAllTagsOfInstance field's value. +func (s *InstanceTagNotificationAttribute) SetIncludeAllTagsOfInstance(v bool) *InstanceTagNotificationAttribute { + s.IncludeAllTagsOfInstance = &v + return s +} + +// SetInstanceTagKeys sets the InstanceTagKeys field's value. +func (s *InstanceTagNotificationAttribute) SetInstanceTagKeys(v []*string) *InstanceTagNotificationAttribute { + s.InstanceTagKeys = v + return s +} + // Describes the instance type. type InstanceTypeInfo struct { _ struct{} `type:"structure"` @@ -79096,13 +86256,13 @@ type InstanceTypeInfo struct { // Indicates whether auto recovery is supported. AutoRecoverySupported *bool `locationName:"autoRecoverySupported" type:"boolean"` - // Indicates whether the instance is bare metal. + // Indicates whether the instance is a bare metal instance type. BareMetal *bool `locationName:"bareMetal" type:"boolean"` // Indicates whether the instance type is a burstable performance instance type. BurstablePerformanceSupported *bool `locationName:"burstablePerformanceSupported" type:"boolean"` - // Indicates whether the instance type is a current generation. + // Indicates whether the instance type is current generation. CurrentGeneration *bool `locationName:"currentGeneration" type:"boolean"` // Indicates whether Dedicated Hosts are supported on the instance type. @@ -79123,13 +86283,13 @@ type InstanceTypeInfo struct { // Indicates whether On-Demand hibernation is supported. HibernationSupported *bool `locationName:"hibernationSupported" type:"boolean"` - // Indicates the hypervisor used for the instance type. + // The hypervisor for the instance type. Hypervisor *string `locationName:"hypervisor" type:"string" enum:"InstanceTypeHypervisor"` // Describes the Inference accelerator settings for the instance type. InferenceAcceleratorInfo *InferenceAcceleratorInfo `locationName:"inferenceAcceleratorInfo" type:"structure"` - // Describes the disks for the instance type. + // Describes the instance storage for the instance type. InstanceStorageInfo *InstanceStorageInfo `locationName:"instanceStorageInfo" type:"structure"` // Indicates whether instance storage is supported. @@ -79151,12 +86311,15 @@ type InstanceTypeInfo struct { // Describes the processor. ProcessorInfo *ProcessorInfo `locationName:"processorInfo" type:"structure"` - // Indicates the supported root device types. + // The supported root device types. SupportedRootDeviceTypes []*string `locationName:"supportedRootDeviceTypes" locationNameList:"item" type:"list"` // Indicates whether the instance type is offered for spot or On-Demand. SupportedUsageClasses []*string `locationName:"supportedUsageClasses" locationNameList:"item" type:"list"` + // The supported virtualization types. + SupportedVirtualizationTypes []*string `locationName:"supportedVirtualizationTypes" locationNameList:"item" type:"list"` + // Describes the vCPU configurations for the instance type. VCpuInfo *VCpuInfo `locationName:"vCpuInfo" type:"structure"` } @@ -79297,6 +86460,12 @@ func (s *InstanceTypeInfo) SetSupportedUsageClasses(v []*string) *InstanceTypeIn return s } +// SetSupportedVirtualizationTypes sets the SupportedVirtualizationTypes field's value. +func (s *InstanceTypeInfo) SetSupportedVirtualizationTypes(v []*string) *InstanceTypeInfo { + s.SupportedVirtualizationTypes = v + return s +} + // SetVCpuInfo sets the VCpuInfo field's value. func (s *InstanceTypeInfo) SetVCpuInfo(v *VCpuInfo) *InstanceTypeInfo { s.VCpuInfo = v @@ -79493,9 +86662,7 @@ type IpPermission struct { // [VPC only] The IPv6 ranges. Ipv6Ranges []*Ipv6Range `locationName:"ipv6Ranges" locationNameList:"item" type:"list"` - // [VPC only] The prefix list IDs for an AWS service. With outbound rules, this - // is the AWS service to access through a VPC endpoint from instances associated - // with the security group. + // [VPC only] The prefix list IDs. PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. @@ -79597,6 +86764,39 @@ func (s *IpRange) SetDescription(v string) *IpRange { return s } +// Describes an IPv6 CIDR block association. +type Ipv6CidrAssociation struct { + _ struct{} `type:"structure"` + + // The resource that's associated with the IPv6 CIDR block. + AssociatedResource *string `locationName:"associatedResource" type:"string"` + + // The IPv6 CIDR block. + Ipv6Cidr *string `locationName:"ipv6Cidr" type:"string"` +} + +// String returns the string representation +func (s Ipv6CidrAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ipv6CidrAssociation) GoString() string { + return s.String() +} + +// SetAssociatedResource sets the AssociatedResource field's value. +func (s *Ipv6CidrAssociation) SetAssociatedResource(v string) *Ipv6CidrAssociation { + s.AssociatedResource = &v + return s +} + +// SetIpv6Cidr sets the Ipv6Cidr field's value. +func (s *Ipv6CidrAssociation) SetIpv6Cidr(v string) *Ipv6CidrAssociation { + s.Ipv6Cidr = &v + return s +} + // Describes an IPv6 CIDR block. type Ipv6CidrBlock struct { _ struct{} `type:"structure"` @@ -79621,6 +86821,57 @@ func (s *Ipv6CidrBlock) SetIpv6CidrBlock(v string) *Ipv6CidrBlock { return s } +// Describes an IPv6 address pool. +type Ipv6Pool struct { + _ struct{} `type:"structure"` + + // The description for the address pool. + Description *string `locationName:"description" type:"string"` + + // The CIDR blocks for the address pool. + PoolCidrBlocks []*PoolCidrBlock `locationName:"poolCidrBlockSet" locationNameList:"item" type:"list"` + + // The ID of the address pool. + PoolId *string `locationName:"poolId" type:"string"` + + // Any tags for the address pool. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s Ipv6Pool) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ipv6Pool) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *Ipv6Pool) SetDescription(v string) *Ipv6Pool { + s.Description = &v + return s +} + +// SetPoolCidrBlocks sets the PoolCidrBlocks field's value. +func (s *Ipv6Pool) SetPoolCidrBlocks(v []*PoolCidrBlock) *Ipv6Pool { + s.PoolCidrBlocks = v + return s +} + +// SetPoolId sets the PoolId field's value. +func (s *Ipv6Pool) SetPoolId(v string) *Ipv6Pool { + s.PoolId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Ipv6Pool) SetTags(v []*Tag) *Ipv6Pool { + s.Tags = v + return s +} + // [EC2-VPC only] Describes an IPv6 range. type Ipv6Range struct { _ struct{} `type:"structure"` @@ -80198,7 +87449,7 @@ func (s *LaunchTemplateBlockDeviceMappingRequest) SetVirtualName(v string) *Laun // to configure the instance to run in On-Demand capacity or to run in any open // Capacity Reservation that has matching attributes (instance type, platform, // Availability Zone). Use the CapacityReservationTarget parameter to explicitly -// target a specific Capacity Reservation. +// target a specific Capacity Reservation or a Capacity Reservation group. type LaunchTemplateCapacityReservationSpecificationRequest struct { _ struct{} `type:"structure"` @@ -80212,7 +87463,8 @@ type LaunchTemplateCapacityReservationSpecificationRequest struct { // one is available. The instance runs in On-Demand capacity. CapacityReservationPreference *string `type:"string" enum:"CapacityReservationPreference"` - // Information about the target Capacity Reservation. + // Information about the target Capacity Reservation or Capacity Reservation + // group. CapacityReservationTarget *CapacityReservationTarget `type:"structure"` } @@ -80252,7 +87504,8 @@ type LaunchTemplateCapacityReservationSpecificationResponse struct { // one is available. The instance runs in On-Demand capacity. CapacityReservationPreference *string `locationName:"capacityReservationPreference" type:"string" enum:"CapacityReservationPreference"` - // Information about the target Capacity Reservation. + // Information about the target Capacity Reservation or Capacity Reservation + // group. CapacityReservationTarget *CapacityReservationTargetResponse `locationName:"capacityReservationTarget" type:"structure"` } @@ -80485,16 +87738,15 @@ type LaunchTemplateEbsBlockDeviceRequest struct { // a volume from a snapshot, you can't specify an encryption value. Encrypted *bool `type:"boolean"` - // The number of I/O operations per second (IOPS) that the volume supports. - // For io1, this represents the number of IOPS that are provisioned for the - // volume. For gp2, this represents the baseline performance of the volume and - // the rate at which the volume accumulates I/O credits for bursting. For more - // information about General Purpose SSD baseline performance, I/O credits, - // and bursting, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // The number of I/O operations per second (IOPS) to provision for an io1 or + // io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB + // for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum + // IOPS of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Other instance families guarantee performance up to 32,000 IOPS. For more + // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Condition: This parameter is required for requests to create io1 volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes. Iops *int64 `type:"integer"` // The ARN of the symmetric AWS Key Management Service (AWS KMS) CMK used for @@ -80656,6 +87908,57 @@ func (s *LaunchTemplateElasticInferenceAcceleratorResponse) SetType(v string) *L return s } +// Indicates whether the instance is enabled for AWS Nitro Enclaves. +type LaunchTemplateEnclaveOptions struct { + _ struct{} `type:"structure"` + + // If this parameter is set to true, the instance is enabled for AWS Nitro Enclaves; + // otherwise, it is not enabled for AWS Nitro Enclaves. + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplateEnclaveOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateEnclaveOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *LaunchTemplateEnclaveOptions) SetEnabled(v bool) *LaunchTemplateEnclaveOptions { + s.Enabled = &v + return s +} + +// Indicates whether the instance is enabled for AWS Nitro Enclaves. For more +// information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) +// in the AWS Nitro Enclaves User Guide. +type LaunchTemplateEnclaveOptionsRequest struct { + _ struct{} `type:"structure"` + + // To enable the instance for AWS Nitro Enclaves, set this parameter to true. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s LaunchTemplateEnclaveOptionsRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateEnclaveOptionsRequest) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *LaunchTemplateEnclaveOptionsRequest) SetEnabled(v bool) *LaunchTemplateEnclaveOptionsRequest { + s.Enabled = &v + return s +} + // Indicates whether an instance is configured for hibernation. type LaunchTemplateHibernationOptions struct { _ struct{} `type:"structure"` @@ -80988,6 +88291,15 @@ func (s *LaunchTemplateInstanceMetadataOptionsRequest) SetHttpTokens(v string) * type LaunchTemplateInstanceNetworkInterfaceSpecification struct { _ struct{} `type:"structure"` + // Indicates whether to associate a Carrier IP address with eth0 for a new network + // interface. + // + // Use this option when you launch an instance in a Wavelength Zone and want + // to associate a Carrier IP address with the network interface. For more information + // about Carrier IP addresses, see Carrier IP addresses (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip) + // in the AWS Wavelength Developer Guide. + AssociateCarrierIpAddress *bool `locationName:"associateCarrierIpAddress" type:"boolean"` + // Indicates whether to associate a public IPv4 address with eth0 for a new // network interface. AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` @@ -81013,6 +88325,9 @@ type LaunchTemplateInstanceNetworkInterfaceSpecification struct { // The IPv6 addresses for the network interface. Ipv6Addresses []*InstanceIpv6Address `locationName:"ipv6AddressesSet" locationNameList:"item" type:"list"` + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + // The ID of the network interface. NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` @@ -81039,6 +88354,12 @@ func (s LaunchTemplateInstanceNetworkInterfaceSpecification) GoString() string { return s.String() } +// SetAssociateCarrierIpAddress sets the AssociateCarrierIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetAssociateCarrierIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.AssociateCarrierIpAddress = &v + return s +} + // SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetAssociatePublicIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecification { s.AssociatePublicIpAddress = &v @@ -81087,6 +88408,12 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetIpv6Addresses(v return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetNetworkCardIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.NetworkCardIndex = &v + return s +} + // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetNetworkInterfaceId(v string) *LaunchTemplateInstanceNetworkInterfaceSpecification { s.NetworkInterfaceId = &v @@ -81121,6 +88448,14 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetSubnetId(v stri type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { _ struct{} `type:"structure"` + // Associates a Carrier IP address with eth0 for a new network interface. + // + // Use this option when you launch an instance in a Wavelength Zone and want + // to associate a Carrier IP address with the network interface. For more information + // about Carrier IP addresses, see Carrier IP addresses (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip) + // in the AWS Wavelength Developer Guide. + AssociateCarrierIpAddress *bool `type:"boolean"` + // Associates a public IPv4 address with eth0 for a new network interface. AssociatePublicIpAddress *bool `type:"boolean"` @@ -81154,6 +88489,11 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { // subnet. You can't use this option if you're specifying a number of IPv6 addresses. Ipv6Addresses []*InstanceIpv6AddressRequest `locationNameList:"InstanceIpv6Address" type:"list"` + // The index of the network card. Some instance types support multiple network + // cards. The primary network interface must be assigned to network card index + // 0. The default is network card index 0. + NetworkCardIndex *int64 `type:"integer"` + // The ID of the network interface. NetworkInterfaceId *string `type:"string"` @@ -81180,6 +88520,12 @@ func (s LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) GoString() s return s.String() } +// SetAssociateCarrierIpAddress sets the AssociateCarrierIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetAssociateCarrierIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.AssociateCarrierIpAddress = &v + return s +} + // SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetAssociatePublicIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { s.AssociatePublicIpAddress = &v @@ -81228,6 +88574,12 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetIpv6Addr return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetNetworkCardIndex(v int64) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.NetworkCardIndex = &v + return s +} + // SetNetworkInterfaceId sets the NetworkInterfaceId field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetNetworkInterfaceId(v string) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { s.NetworkInterfaceId = &v @@ -82152,7 +89504,7 @@ type LocalGateway struct { // The Amazon Resource Name (ARN) of the Outpost. OutpostArn *string `locationName:"outpostArn" type:"string"` - // The ID of the AWS account ID that owns the local gateway. + // The AWS account ID that owns the local gateway. OwnerId *string `locationName:"ownerId" type:"string"` // The state of the local gateway. @@ -82209,12 +89561,18 @@ type LocalGatewayRoute struct { // The CIDR block used for destination matches. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + // The Amazon Resource Name (ARN) of the local gateway route table. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` // The ID of the virtual interface group. LocalGatewayVirtualInterfaceGroupId *string `locationName:"localGatewayVirtualInterfaceGroupId" type:"string"` + // The AWS account ID that owns the local gateway route. + OwnerId *string `locationName:"ownerId" type:"string"` + // The state of the route. State *string `locationName:"state" type:"string" enum:"LocalGatewayRouteState"` @@ -82238,6 +89596,12 @@ func (s *LocalGatewayRoute) SetDestinationCidrBlock(v string) *LocalGatewayRoute return s } +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRoute) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRoute { + s.LocalGatewayRouteTableArn = &v + return s +} + // SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. func (s *LocalGatewayRoute) SetLocalGatewayRouteTableId(v string) *LocalGatewayRoute { s.LocalGatewayRouteTableId = &v @@ -82250,6 +89614,12 @@ func (s *LocalGatewayRoute) SetLocalGatewayVirtualInterfaceGroupId(v string) *Lo return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRoute) SetOwnerId(v string) *LocalGatewayRoute { + s.OwnerId = &v + return s +} + // SetState sets the State field's value. func (s *LocalGatewayRoute) SetState(v string) *LocalGatewayRoute { s.State = &v @@ -82269,12 +89639,18 @@ type LocalGatewayRouteTable struct { // The ID of the local gateway. LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + // The Amazon Resource Name (ARN) of the local gateway route table. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` // The Amazon Resource Name (ARN) of the Outpost. OutpostArn *string `locationName:"outpostArn" type:"string"` + // The AWS account ID that owns the local gateway route table. + OwnerId *string `locationName:"ownerId" type:"string"` + // The state of the local gateway route table. State *string `locationName:"state" type:"string"` @@ -82298,6 +89674,12 @@ func (s *LocalGatewayRouteTable) SetLocalGatewayId(v string) *LocalGatewayRouteT return s } +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRouteTable) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRouteTable { + s.LocalGatewayRouteTableArn = &v + return s +} + // SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. func (s *LocalGatewayRouteTable) SetLocalGatewayRouteTableId(v string) *LocalGatewayRouteTable { s.LocalGatewayRouteTableId = &v @@ -82310,6 +89692,12 @@ func (s *LocalGatewayRouteTable) SetOutpostArn(v string) *LocalGatewayRouteTable return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRouteTable) SetOwnerId(v string) *LocalGatewayRouteTable { + s.OwnerId = &v + return s +} + // SetState sets the State field's value. func (s *LocalGatewayRouteTable) SetState(v string) *LocalGatewayRouteTable { s.State = &v @@ -82330,6 +89718,10 @@ type LocalGatewayRouteTableVirtualInterfaceGroupAssociation struct { // The ID of the local gateway. LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + // The Amazon Resource Name (ARN) of the local gateway route table for the virtual + // interface group. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` @@ -82339,6 +89731,9 @@ type LocalGatewayRouteTableVirtualInterfaceGroupAssociation struct { // The ID of the virtual interface group. LocalGatewayVirtualInterfaceGroupId *string `locationName:"localGatewayVirtualInterfaceGroupId" type:"string"` + // The AWS account ID that owns the local gateway virtual interface group association. + OwnerId *string `locationName:"ownerId" type:"string"` + // The state of the association. State *string `locationName:"state" type:"string"` @@ -82362,6 +89757,12 @@ func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGateway return s } +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.LocalGatewayRouteTableArn = &v + return s +} + // SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGatewayRouteTableId(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { s.LocalGatewayRouteTableId = &v @@ -82380,6 +89781,12 @@ func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGateway return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetOwnerId(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.OwnerId = &v + return s +} + // SetState sets the State field's value. func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetState(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { s.State = &v @@ -82399,12 +89806,18 @@ type LocalGatewayRouteTableVpcAssociation struct { // The ID of the local gateway. LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + // The Amazon Resource Name (ARN) of the local gateway route table for the association. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` // The ID of the association. LocalGatewayRouteTableVpcAssociationId *string `locationName:"localGatewayRouteTableVpcAssociationId" type:"string"` + // The AWS account ID that owns the local gateway route table for the association. + OwnerId *string `locationName:"ownerId" type:"string"` + // The state of the association. State *string `locationName:"state" type:"string"` @@ -82431,6 +89844,12 @@ func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayId(v string) *Loca return s } +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRouteTableVpcAssociation { + s.LocalGatewayRouteTableArn = &v + return s +} + // SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayRouteTableId(v string) *LocalGatewayRouteTableVpcAssociation { s.LocalGatewayRouteTableId = &v @@ -82443,6 +89862,12 @@ func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayRouteTableVpcAssoc return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetOwnerId(v string) *LocalGatewayRouteTableVpcAssociation { + s.OwnerId = &v + return s +} + // SetState sets the State field's value. func (s *LocalGatewayRouteTableVpcAssociation) SetState(v string) *LocalGatewayRouteTableVpcAssociation { s.State = &v @@ -82478,6 +89903,9 @@ type LocalGatewayVirtualInterface struct { // The ID of the virtual interface. LocalGatewayVirtualInterfaceId *string `locationName:"localGatewayVirtualInterfaceId" type:"string"` + // The AWS account ID that owns the local gateway virtual interface. + OwnerId *string `locationName:"ownerId" type:"string"` + // The peer address. PeerAddress *string `locationName:"peerAddress" type:"string"` @@ -82525,6 +89953,12 @@ func (s *LocalGatewayVirtualInterface) SetLocalGatewayVirtualInterfaceId(v strin return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayVirtualInterface) SetOwnerId(v string) *LocalGatewayVirtualInterface { + s.OwnerId = &v + return s +} + // SetPeerAddress sets the PeerAddress field's value. func (s *LocalGatewayVirtualInterface) SetPeerAddress(v string) *LocalGatewayVirtualInterface { s.PeerAddress = &v @@ -82562,6 +89996,9 @@ type LocalGatewayVirtualInterfaceGroup struct { // The IDs of the virtual interfaces. LocalGatewayVirtualInterfaceIds []*string `locationName:"localGatewayVirtualInterfaceIdSet" locationNameList:"item" type:"list"` + // The AWS account ID that owns the local gateway virtual interface group. + OwnerId *string `locationName:"ownerId" type:"string"` + // The tags assigned to the virtual interface group. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -82594,17 +90031,128 @@ func (s *LocalGatewayVirtualInterfaceGroup) SetLocalGatewayVirtualInterfaceIds(v return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayVirtualInterfaceGroup) SetOwnerId(v string) *LocalGatewayVirtualInterfaceGroup { + s.OwnerId = &v + return s +} + // SetTags sets the Tags field's value. func (s *LocalGatewayVirtualInterfaceGroup) SetTags(v []*Tag) *LocalGatewayVirtualInterfaceGroup { s.Tags = v return s } +// Describes a managed prefix list. +type ManagedPrefixList struct { + _ struct{} `type:"structure"` + + // The IP address version. + AddressFamily *string `locationName:"addressFamily" type:"string"` + + // The maximum number of entries for the prefix list. + MaxEntries *int64 `locationName:"maxEntries" type:"integer"` + + // The ID of the owner of the prefix list. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The Amazon Resource Name (ARN) for the prefix list. + PrefixListArn *string `locationName:"prefixListArn" min:"1" type:"string"` + + // The ID of the prefix list. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The name of the prefix list. + PrefixListName *string `locationName:"prefixListName" type:"string"` + + // The state of the prefix list. + State *string `locationName:"state" type:"string" enum:"PrefixListState"` + + // The state message. + StateMessage *string `locationName:"stateMessage" type:"string"` + + // The tags for the prefix list. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The version of the prefix list. + Version *int64 `locationName:"version" type:"long"` +} + +// String returns the string representation +func (s ManagedPrefixList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedPrefixList) GoString() string { + return s.String() +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *ManagedPrefixList) SetAddressFamily(v string) *ManagedPrefixList { + s.AddressFamily = &v + return s +} + +// SetMaxEntries sets the MaxEntries field's value. +func (s *ManagedPrefixList) SetMaxEntries(v int64) *ManagedPrefixList { + s.MaxEntries = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *ManagedPrefixList) SetOwnerId(v string) *ManagedPrefixList { + s.OwnerId = &v + return s +} + +// SetPrefixListArn sets the PrefixListArn field's value. +func (s *ManagedPrefixList) SetPrefixListArn(v string) *ManagedPrefixList { + s.PrefixListArn = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ManagedPrefixList) SetPrefixListId(v string) *ManagedPrefixList { + s.PrefixListId = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *ManagedPrefixList) SetPrefixListName(v string) *ManagedPrefixList { + s.PrefixListName = &v + return s +} + +// SetState sets the State field's value. +func (s *ManagedPrefixList) SetState(v string) *ManagedPrefixList { + s.State = &v + return s +} + +// SetStateMessage sets the StateMessage field's value. +func (s *ManagedPrefixList) SetStateMessage(v string) *ManagedPrefixList { + s.StateMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ManagedPrefixList) SetTags(v []*Tag) *ManagedPrefixList { + s.Tags = v + return s +} + +// SetVersion sets the Version field's value. +func (s *ManagedPrefixList) SetVersion(v int64) *ManagedPrefixList { + s.Version = &v + return s +} + // Describes the memory for the instance type. type MemoryInfo struct { _ struct{} `type:"structure"` - // Size of the memory, in MiB. + // The size of the memory, in MiB. SizeInMiB *int64 `locationName:"sizeInMiB" type:"long"` } @@ -82624,6 +90172,97 @@ func (s *MemoryInfo) SetSizeInMiB(v int64) *MemoryInfo { return s } +type ModifyAvailabilityZoneGroupInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The name of the Availability Zone group, Local Zone group, or Wavelength + // Zone group. + // + // GroupName is a required field + GroupName *string `type:"string" required:"true"` + + // Indicates whether you are opted in to the Local Zone group or Wavelength + // Zone group. The only valid value is opted-in. You must contact AWS Support + // (https://console.aws.amazon.com/support/home#/case/create%3FissueType=customer-service%26serviceCode=general-info%26getting-started%26categoryCode=using-aws%26services) + // to opt out of a Local Zone group, or Wavelength Zone group. + // + // OptInStatus is a required field + OptInStatus *string `type:"string" required:"true" enum:"ModifyAvailabilityZoneOptInStatus"` +} + +// String returns the string representation +func (s ModifyAvailabilityZoneGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyAvailabilityZoneGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyAvailabilityZoneGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyAvailabilityZoneGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.OptInStatus == nil { + invalidParams.Add(request.NewErrParamRequired("OptInStatus")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyAvailabilityZoneGroupInput) SetDryRun(v bool) *ModifyAvailabilityZoneGroupInput { + s.DryRun = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *ModifyAvailabilityZoneGroupInput) SetGroupName(v string) *ModifyAvailabilityZoneGroupInput { + s.GroupName = &v + return s +} + +// SetOptInStatus sets the OptInStatus field's value. +func (s *ModifyAvailabilityZoneGroupInput) SetOptInStatus(v string) *ModifyAvailabilityZoneGroupInput { + s.OptInStatus = &v + return s +} + +type ModifyAvailabilityZoneGroupOutput struct { + _ struct{} `type:"structure"` + + // Is true if the request succeeds, and an error otherwise. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s ModifyAvailabilityZoneGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyAvailabilityZoneGroupOutput) GoString() string { + return s.String() +} + +// SetReturn sets the Return field's value. +func (s *ModifyAvailabilityZoneGroupOutput) SetReturn(v bool) *ModifyAvailabilityZoneGroupOutput { + s.Return = &v + return s +} + type ModifyCapacityReservationInput struct { _ struct{} `type:"structure"` @@ -82744,6 +90383,9 @@ func (s *ModifyCapacityReservationOutput) SetReturn(v bool) *ModifyCapacityReser type ModifyClientVpnEndpointInput struct { _ struct{} `type:"structure"` + // The options for managing connection authorization for new client connections. + ClientConnectOptions *ClientConnectOptions `type:"structure"` + // The ID of the Client VPN endpoint to modify. // // ClientVpnEndpointId is a required field @@ -82776,6 +90418,12 @@ type ModifyClientVpnEndpointInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The IDs of one or more security groups to apply to the target network. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"` + + // Specify whether to enable the self-service portal for the Client VPN endpoint. + SelfServicePortal *string `type:"string" enum:"SelfServicePortal"` + // The ARN of the server certificate to be used. The server certificate must // be provisioned in AWS Certificate Manager (ACM). ServerCertificateArn *string `type:"string"` @@ -82786,6 +90434,16 @@ type ModifyClientVpnEndpointInput struct { // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) // in the AWS Client VPN Administrator Guide. SplitTunnel *bool `type:"boolean"` + + // The ID of the VPC to associate with the Client VPN endpoint. + VpcId *string `type:"string"` + + // The port number to assign to the Client VPN endpoint for TCP and UDP traffic. + // + // Valid Values: 443 | 1194 + // + // Default Value: 443 + VpnPort *int64 `type:"integer"` } // String returns the string representation @@ -82811,6 +90469,12 @@ func (s *ModifyClientVpnEndpointInput) Validate() error { return nil } +// SetClientConnectOptions sets the ClientConnectOptions field's value. +func (s *ModifyClientVpnEndpointInput) SetClientConnectOptions(v *ClientConnectOptions) *ModifyClientVpnEndpointInput { + s.ClientConnectOptions = v + return s +} + // SetClientVpnEndpointId sets the ClientVpnEndpointId field's value. func (s *ModifyClientVpnEndpointInput) SetClientVpnEndpointId(v string) *ModifyClientVpnEndpointInput { s.ClientVpnEndpointId = &v @@ -82841,6 +90505,18 @@ func (s *ModifyClientVpnEndpointInput) SetDryRun(v bool) *ModifyClientVpnEndpoin return s } +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *ModifyClientVpnEndpointInput) SetSecurityGroupIds(v []*string) *ModifyClientVpnEndpointInput { + s.SecurityGroupIds = v + return s +} + +// SetSelfServicePortal sets the SelfServicePortal field's value. +func (s *ModifyClientVpnEndpointInput) SetSelfServicePortal(v string) *ModifyClientVpnEndpointInput { + s.SelfServicePortal = &v + return s +} + // SetServerCertificateArn sets the ServerCertificateArn field's value. func (s *ModifyClientVpnEndpointInput) SetServerCertificateArn(v string) *ModifyClientVpnEndpointInput { s.ServerCertificateArn = &v @@ -82853,6 +90529,18 @@ func (s *ModifyClientVpnEndpointInput) SetSplitTunnel(v bool) *ModifyClientVpnEn return s } +// SetVpcId sets the VpcId field's value. +func (s *ModifyClientVpnEndpointInput) SetVpcId(v string) *ModifyClientVpnEndpointInput { + s.VpcId = &v + return s +} + +// SetVpnPort sets the VpnPort field's value. +func (s *ModifyClientVpnEndpointInput) SetVpnPort(v int64) *ModifyClientVpnEndpointInput { + s.VpnPort = &v + return s +} + type ModifyClientVpnEndpointOutput struct { _ struct{} `type:"structure"` @@ -82981,11 +90669,11 @@ type ModifyEbsDefaultKmsKeyIdInput struct { // // You can specify the CMK using any of the following: // - // * Key ID. For example, key/1234abcd-12ab-34cd-56ef-1234567890ab. + // * Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab. // // * Key alias. For example, alias/ExampleAlias. // - // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. + // * Key ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab. // // * Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias. // @@ -83076,10 +90764,11 @@ type ModifyFleetInput struct { // FleetId is a required field FleetId *string `type:"string" required:"true"` + // The launch template and overrides. + LaunchTemplateConfigs []*FleetLaunchTemplateConfigRequest `locationName:"LaunchTemplateConfig" locationNameList:"item" type:"list"` + // The size of the EC2 Fleet. - // - // TargetCapacitySpecification is a required field - TargetCapacitySpecification *TargetCapacitySpecificationRequest `type:"structure" required:"true"` + TargetCapacitySpecification *TargetCapacitySpecificationRequest `type:"structure"` } // String returns the string representation @@ -83098,8 +90787,15 @@ func (s *ModifyFleetInput) Validate() error { if s.FleetId == nil { invalidParams.Add(request.NewErrParamRequired("FleetId")) } - if s.TargetCapacitySpecification == nil { - invalidParams.Add(request.NewErrParamRequired("TargetCapacitySpecification")) + if s.LaunchTemplateConfigs != nil { + for i, v := range s.LaunchTemplateConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchTemplateConfigs", i), err.(request.ErrInvalidParams)) + } + } } if s.TargetCapacitySpecification != nil { if err := s.TargetCapacitySpecification.Validate(); err != nil { @@ -83131,6 +90827,12 @@ func (s *ModifyFleetInput) SetFleetId(v string) *ModifyFleetInput { return s } +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *ModifyFleetInput) SetLaunchTemplateConfigs(v []*FleetLaunchTemplateConfigRequest) *ModifyFleetInput { + s.LaunchTemplateConfigs = v + return s +} + // SetTargetCapacitySpecification sets the TargetCapacitySpecification field's value. func (s *ModifyFleetInput) SetTargetCapacitySpecification(v *TargetCapacitySpecificationRequest) *ModifyFleetInput { s.TargetCapacitySpecification = v @@ -83751,7 +91453,7 @@ type ModifyInstanceAttributeInput struct { // // To add instance store volumes to an Amazon EBS-backed instance, you must // add them when you launch the instance. For more information, see Updating - // the Block Device Mapping when Launching an Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) + // the block device mapping when launching an instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) // in the Amazon Elastic Compute Cloud User Guide. BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` @@ -83794,7 +91496,7 @@ type ModifyInstanceAttributeInput struct { InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` // Changes the instance type to the specified value. For more information, see - // Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // If the instance type is not valid, the error returned is InvalidInstanceAttributeValue. InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` @@ -84602,6 +92304,135 @@ func (s *ModifyLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *Modif return s } +type ModifyManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // One or more entries to add to the prefix list. + AddEntries []*AddPrefixListEntry `locationName:"AddEntry" type:"list"` + + // The current version of the prefix list. + CurrentVersion *int64 `type:"long"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // A name for the prefix list. + PrefixListName *string `type:"string"` + + // One or more entries to remove from the prefix list. + RemoveEntries []*RemovePrefixListEntry `locationName:"RemoveEntry" type:"list"` +} + +// String returns the string representation +func (s ModifyManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyManagedPrefixListInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.AddEntries != nil { + for i, v := range s.AddEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddEntries", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RemoveEntries != nil { + for i, v := range s.RemoveEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RemoveEntries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddEntries sets the AddEntries field's value. +func (s *ModifyManagedPrefixListInput) SetAddEntries(v []*AddPrefixListEntry) *ModifyManagedPrefixListInput { + s.AddEntries = v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *ModifyManagedPrefixListInput) SetCurrentVersion(v int64) *ModifyManagedPrefixListInput { + s.CurrentVersion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyManagedPrefixListInput) SetDryRun(v bool) *ModifyManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ModifyManagedPrefixListInput) SetPrefixListId(v string) *ModifyManagedPrefixListInput { + s.PrefixListId = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *ModifyManagedPrefixListInput) SetPrefixListName(v string) *ModifyManagedPrefixListInput { + s.PrefixListName = &v + return s +} + +// SetRemoveEntries sets the RemoveEntries field's value. +func (s *ModifyManagedPrefixListInput) SetRemoveEntries(v []*RemovePrefixListEntry) *ModifyManagedPrefixListInput { + s.RemoveEntries = v + return s +} + +type ModifyManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s ModifyManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *ModifyManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *ModifyManagedPrefixListOutput { + s.PrefixList = v + return s +} + // Contains the parameters for ModifyNetworkInterfaceAttribute. type ModifyNetworkInterfaceAttributeInput struct { _ struct{} `type:"structure"` @@ -84917,6 +92748,12 @@ type ModifySpotFleetRequestInput struct { // the Spot Fleet. ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + // The launch template and overrides. You can only use this parameter if you + // specified a launch template (LaunchTemplateConfigs) in your Spot Fleet request. + // If you specified LaunchSpecifications in your Spot Fleet request, then omit + // this parameter. + LaunchTemplateConfigs []*LaunchTemplateConfig `locationName:"LaunchTemplateConfig" locationNameList:"item" type:"list"` + // The number of On-Demand Instances in the fleet. OnDemandTargetCapacity *int64 `type:"integer"` @@ -84945,6 +92782,16 @@ func (s *ModifySpotFleetRequestInput) Validate() error { if s.SpotFleetRequestId == nil { invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) } + if s.LaunchTemplateConfigs != nil { + for i, v := range s.LaunchTemplateConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchTemplateConfigs", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -84958,6 +92805,12 @@ func (s *ModifySpotFleetRequestInput) SetExcessCapacityTerminationPolicy(v strin return s } +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *ModifySpotFleetRequestInput) SetLaunchTemplateConfigs(v []*LaunchTemplateConfig) *ModifySpotFleetRequestInput { + s.LaunchTemplateConfigs = v + return s +} + // SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. func (s *ModifySpotFleetRequestInput) SetOnDemandTargetCapacity(v int64) *ModifySpotFleetRequestInput { s.OnDemandTargetCapacity = &v @@ -85013,8 +92866,20 @@ type ModifySubnetAttributeInput struct { // or later of the Amazon EC2 API. AssignIpv6AddressOnCreation *AttributeBooleanValue `type:"structure"` - // Specify true to indicate that ENIs attached to instances created in the specified - // subnet should be assigned a public IPv4 address. + // The customer-owned IPv4 address pool associated with the subnet. + // + // You must set this value when you specify true for MapCustomerOwnedIpOnLaunch. + CustomerOwnedIpv4Pool *string `type:"string"` + + // Specify true to indicate that network interfaces attached to instances created + // in the specified subnet should be assigned a customer-owned IPv4 address. + // + // When this value is true, you must specify the customer-owned IP pool using + // CustomerOwnedIpv4Pool. + MapCustomerOwnedIpOnLaunch *AttributeBooleanValue `type:"structure"` + + // Specify true to indicate that network interfaces attached to instances created + // in the specified subnet should be assigned a public IPv4 address. MapPublicIpOnLaunch *AttributeBooleanValue `type:"structure"` // The ID of the subnet. @@ -85052,6 +92917,18 @@ func (s *ModifySubnetAttributeInput) SetAssignIpv6AddressOnCreation(v *Attribute return s } +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *ModifySubnetAttributeInput) SetCustomerOwnedIpv4Pool(v string) *ModifySubnetAttributeInput { + s.CustomerOwnedIpv4Pool = &v + return s +} + +// SetMapCustomerOwnedIpOnLaunch sets the MapCustomerOwnedIpOnLaunch field's value. +func (s *ModifySubnetAttributeInput) SetMapCustomerOwnedIpOnLaunch(v *AttributeBooleanValue) *ModifySubnetAttributeInput { + s.MapCustomerOwnedIpOnLaunch = v + return s +} + // SetMapPublicIpOnLaunch sets the MapPublicIpOnLaunch field's value. func (s *ModifySubnetAttributeInput) SetMapPublicIpOnLaunch(v *AttributeBooleanValue) *ModifySubnetAttributeInput { s.MapPublicIpOnLaunch = v @@ -85485,6 +93362,282 @@ func (s *ModifyTrafficMirrorSessionOutput) SetTrafficMirrorSession(v *TrafficMir return s } +type ModifyTransitGatewayInput struct { + _ struct{} `type:"structure"` + + // The description for the transit gateway. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The options to modify. + Options *ModifyTransitGatewayOptions `type:"structure"` + + // The ID of the transit gateway. + // + // TransitGatewayId is a required field + TransitGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTransitGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTransitGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTransitGatewayInput"} + if s.TransitGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *ModifyTransitGatewayInput) SetDescription(v string) *ModifyTransitGatewayInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTransitGatewayInput) SetDryRun(v bool) *ModifyTransitGatewayInput { + s.DryRun = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *ModifyTransitGatewayInput) SetOptions(v *ModifyTransitGatewayOptions) *ModifyTransitGatewayInput { + s.Options = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *ModifyTransitGatewayInput) SetTransitGatewayId(v string) *ModifyTransitGatewayInput { + s.TransitGatewayId = &v + return s +} + +// The transit gateway options. +type ModifyTransitGatewayOptions struct { + _ struct{} `type:"structure"` + + // The ID of the default association route table. + AssociationDefaultRouteTableId *string `type:"string"` + + // Enable or disable automatic acceptance of attachment requests. + AutoAcceptSharedAttachments *string `type:"string" enum:"AutoAcceptSharedAttachmentsValue"` + + // Enable or disable automatic association with the default association route + // table. + DefaultRouteTableAssociation *string `type:"string" enum:"DefaultRouteTableAssociationValue"` + + // Enable or disable automatic propagation of routes to the default propagation + // route table. + DefaultRouteTablePropagation *string `type:"string" enum:"DefaultRouteTablePropagationValue"` + + // Enable or disable DNS support. + DnsSupport *string `type:"string" enum:"DnsSupportValue"` + + // The ID of the default propagation route table. + PropagationDefaultRouteTableId *string `type:"string"` + + // Enable or disable Equal Cost Multipath Protocol support. + VpnEcmpSupport *string `type:"string" enum:"VpnEcmpSupportValue"` +} + +// String returns the string representation +func (s ModifyTransitGatewayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayOptions) GoString() string { + return s.String() +} + +// SetAssociationDefaultRouteTableId sets the AssociationDefaultRouteTableId field's value. +func (s *ModifyTransitGatewayOptions) SetAssociationDefaultRouteTableId(v string) *ModifyTransitGatewayOptions { + s.AssociationDefaultRouteTableId = &v + return s +} + +// SetAutoAcceptSharedAttachments sets the AutoAcceptSharedAttachments field's value. +func (s *ModifyTransitGatewayOptions) SetAutoAcceptSharedAttachments(v string) *ModifyTransitGatewayOptions { + s.AutoAcceptSharedAttachments = &v + return s +} + +// SetDefaultRouteTableAssociation sets the DefaultRouteTableAssociation field's value. +func (s *ModifyTransitGatewayOptions) SetDefaultRouteTableAssociation(v string) *ModifyTransitGatewayOptions { + s.DefaultRouteTableAssociation = &v + return s +} + +// SetDefaultRouteTablePropagation sets the DefaultRouteTablePropagation field's value. +func (s *ModifyTransitGatewayOptions) SetDefaultRouteTablePropagation(v string) *ModifyTransitGatewayOptions { + s.DefaultRouteTablePropagation = &v + return s +} + +// SetDnsSupport sets the DnsSupport field's value. +func (s *ModifyTransitGatewayOptions) SetDnsSupport(v string) *ModifyTransitGatewayOptions { + s.DnsSupport = &v + return s +} + +// SetPropagationDefaultRouteTableId sets the PropagationDefaultRouteTableId field's value. +func (s *ModifyTransitGatewayOptions) SetPropagationDefaultRouteTableId(v string) *ModifyTransitGatewayOptions { + s.PropagationDefaultRouteTableId = &v + return s +} + +// SetVpnEcmpSupport sets the VpnEcmpSupport field's value. +func (s *ModifyTransitGatewayOptions) SetVpnEcmpSupport(v string) *ModifyTransitGatewayOptions { + s.VpnEcmpSupport = &v + return s +} + +type ModifyTransitGatewayOutput struct { + _ struct{} `type:"structure"` + + // Describes a transit gateway. + TransitGateway *TransitGateway `locationName:"transitGateway" type:"structure"` +} + +// String returns the string representation +func (s ModifyTransitGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayOutput) GoString() string { + return s.String() +} + +// SetTransitGateway sets the TransitGateway field's value. +func (s *ModifyTransitGatewayOutput) SetTransitGateway(v *TransitGateway) *ModifyTransitGatewayOutput { + s.TransitGateway = v + return s +} + +type ModifyTransitGatewayPrefixListReferenceInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to drop traffic that matches this route. + Blackhole *bool `type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The ID of the attachment to which traffic is routed. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTransitGatewayPrefixListReferenceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTransitGatewayPrefixListReferenceInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlackhole sets the Blackhole field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetBlackhole(v bool) *ModifyTransitGatewayPrefixListReferenceInput { + s.Blackhole = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetDryRun(v bool) *ModifyTransitGatewayPrefixListReferenceInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetPrefixListId(v string) *ModifyTransitGatewayPrefixListReferenceInput { + s.PrefixListId = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetTransitGatewayAttachmentId(v string) *ModifyTransitGatewayPrefixListReferenceInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetTransitGatewayRouteTableId(v string) *ModifyTransitGatewayPrefixListReferenceInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type ModifyTransitGatewayPrefixListReferenceOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list reference. + TransitGatewayPrefixListReference *TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReference" type:"structure"` +} + +// String returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPrefixListReference sets the TransitGatewayPrefixListReference field's value. +func (s *ModifyTransitGatewayPrefixListReferenceOutput) SetTransitGatewayPrefixListReference(v *TransitGatewayPrefixListReference) *ModifyTransitGatewayPrefixListReferenceOutput { + s.TransitGatewayPrefixListReference = v + return s +} + type ModifyTransitGatewayVpcAttachmentInput struct { _ struct{} `type:"structure"` @@ -85592,6 +93745,11 @@ func (s *ModifyTransitGatewayVpcAttachmentOutput) SetTransitGatewayVpcAttachment type ModifyTransitGatewayVpcAttachmentRequestOptions struct { _ struct{} `type:"structure"` + // Enable or disable support for appliance mode. If enabled, a traffic flow + // between a source and destination uses the same Availability Zone for the + // VPC attachment for the lifetime of that flow. The default is disable. + ApplianceModeSupport *string `type:"string" enum:"ApplianceModeSupportValue"` + // Enable or disable DNS support. The default is enable. DnsSupport *string `type:"string" enum:"DnsSupportValue"` @@ -85609,6 +93767,12 @@ func (s ModifyTransitGatewayVpcAttachmentRequestOptions) GoString() string { return s.String() } +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *ModifyTransitGatewayVpcAttachmentRequestOptions) SetApplianceModeSupport(v string) *ModifyTransitGatewayVpcAttachmentRequestOptions { + s.ApplianceModeSupport = &v + return s +} + // SetDnsSupport sets the DnsSupport field's value. func (s *ModifyTransitGatewayVpcAttachmentRequestOptions) SetDnsSupport(v string) *ModifyTransitGatewayVpcAttachmentRequestOptions { s.DnsSupport = &v @@ -85705,8 +93869,8 @@ type ModifyVolumeInput struct { // The target IOPS rate of the volume. // - // This is only valid for Provisioned IOPS SSD (io1) volumes. For more information, - // see Provisioned IOPS SSD (io1) Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops). + // This is only valid for Provisioned IOPS SSD (io1 and io2) volumes. For moreinformation, + // see Provisioned IOPS SSD (io1 and io2) volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops). // // Default: If no IOPS value is specified, the existing value is retained. Iops *int64 `type:"integer"` @@ -86618,6 +94782,123 @@ func (s *ModifyVpnConnectionInput) SetVpnGatewayId(v string) *ModifyVpnConnectio return s } +type ModifyVpnConnectionOptionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: 0.0.0.0/0 + LocalIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: ::/0 + LocalIpv6NetworkCidr *string `type:"string"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + // + // Default: 0.0.0.0/0 + RemoteIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + // + // Default: ::/0 + RemoteIpv6NetworkCidr *string `type:"string"` + + // The ID of the Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnConnectionOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnConnectionOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnConnectionOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnConnectionOptionsInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnConnectionOptionsInput) SetDryRun(v bool) *ModifyVpnConnectionOptionsInput { + s.DryRun = &v + return s +} + +// SetLocalIpv4NetworkCidr sets the LocalIpv4NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetLocalIpv4NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.LocalIpv4NetworkCidr = &v + return s +} + +// SetLocalIpv6NetworkCidr sets the LocalIpv6NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetLocalIpv6NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.LocalIpv6NetworkCidr = &v + return s +} + +// SetRemoteIpv4NetworkCidr sets the RemoteIpv4NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetRemoteIpv4NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.RemoteIpv4NetworkCidr = &v + return s +} + +// SetRemoteIpv6NetworkCidr sets the RemoteIpv6NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetRemoteIpv6NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.RemoteIpv6NetworkCidr = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnConnectionOptionsInput) SetVpnConnectionId(v string) *ModifyVpnConnectionOptionsInput { + s.VpnConnectionId = &v + return s +} + +type ModifyVpnConnectionOptionsOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnConnectionOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnConnectionOptionsOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnConnectionOptionsOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnConnectionOptionsOutput { + s.VpnConnection = v + return s +} + type ModifyVpnConnectionOutput struct { _ struct{} `type:"structure"` @@ -86833,6 +95114,14 @@ func (s *ModifyVpnTunnelOptionsOutput) SetVpnConnection(v *VpnConnection) *Modif type ModifyVpnTunnelOptionsSpecification struct { _ struct{} `type:"structure"` + // The action to take after DPD timeout occurs. Specify restart to restart the + // IKE initiation. Specify clear to end the IKE session. + // + // Valid Values: clear | none | restart + // + // Default: clear + DPDTimeoutAction *string `type:"string"` + // The number of seconds after which a DPD timeout occurs. // // Constraints: A value between 0 and 30. @@ -86848,19 +95137,19 @@ type ModifyVpnTunnelOptionsSpecification struct { // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel // for phase 1 IKE negotiations. // - // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 Phase1DHGroupNumbers []*Phase1DHGroupNumbersRequestListValue `locationName:"Phase1DHGroupNumber" locationNameList:"item" type:"list"` // One or more encryption algorithms that are permitted for the VPN tunnel for // phase 1 IKE negotiations. // - // Valid values: AES128 | AES256 + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsRequestListValue `locationName:"Phase1EncryptionAlgorithm" locationNameList:"item" type:"list"` // One or more integrity algorithms that are permitted for the VPN tunnel for // phase 1 IKE negotiations. // - // Valid values: SHA1 | SHA2-256 + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsRequestListValue `locationName:"Phase1IntegrityAlgorithm" locationNameList:"item" type:"list"` // The lifetime for phase 1 of the IKE negotiation, in seconds. @@ -86873,19 +95162,19 @@ type ModifyVpnTunnelOptionsSpecification struct { // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel // for phase 2 IKE negotiations. // - // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 Phase2DHGroupNumbers []*Phase2DHGroupNumbersRequestListValue `locationName:"Phase2DHGroupNumber" locationNameList:"item" type:"list"` // One or more encryption algorithms that are permitted for the VPN tunnel for // phase 2 IKE negotiations. // - // Valid values: AES128 | AES256 + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsRequestListValue `locationName:"Phase2EncryptionAlgorithm" locationNameList:"item" type:"list"` // One or more integrity algorithms that are permitted for the VPN tunnel for // phase 2 IKE negotiations. // - // Valid values: SHA1 | SHA2-256 + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsRequestListValue `locationName:"Phase2IntegrityAlgorithm" locationNameList:"item" type:"list"` // The lifetime for phase 2 of the IKE negotiation, in seconds. @@ -86928,7 +95217,16 @@ type ModifyVpnTunnelOptionsSpecification struct { // Default: 1024 ReplayWindowSize *int64 `type:"integer"` - // The range of inside IP addresses for the tunnel. Any specified CIDR blocks + // The action to take when the establishing the tunnel for the VPN connection. + // By default, your customer gateway device must initiate the IKE negotiation + // and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. + // + // Valid Values: add | start + // + // Default: add + StartupAction *string `type:"string"` + + // The range of inside IPv4 addresses for the tunnel. Any specified CIDR blocks // must be unique across all VPN connections that use the same virtual private // gateway. // @@ -86949,6 +95247,12 @@ type ModifyVpnTunnelOptionsSpecification struct { // // * 169.254.169.252/30 TunnelInsideCidr *string `type:"string"` + + // The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same transit gateway. + // + // Constraints: A size /126 CIDR block from the local fd00::/8 range. + TunnelInsideIpv6Cidr *string `type:"string"` } // String returns the string representation @@ -86961,6 +95265,12 @@ func (s ModifyVpnTunnelOptionsSpecification) GoString() string { return s.String() } +// SetDPDTimeoutAction sets the DPDTimeoutAction field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetDPDTimeoutAction(v string) *ModifyVpnTunnelOptionsSpecification { + s.DPDTimeoutAction = &v + return s +} + // SetDPDTimeoutSeconds sets the DPDTimeoutSeconds field's value. func (s *ModifyVpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { s.DPDTimeoutSeconds = &v @@ -87045,12 +95355,24 @@ func (s *ModifyVpnTunnelOptionsSpecification) SetReplayWindowSize(v int64) *Modi return s } +// SetStartupAction sets the StartupAction field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetStartupAction(v string) *ModifyVpnTunnelOptionsSpecification { + s.StartupAction = &v + return s +} + // SetTunnelInsideCidr sets the TunnelInsideCidr field's value. func (s *ModifyVpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *ModifyVpnTunnelOptionsSpecification { s.TunnelInsideCidr = &v return s } +// SetTunnelInsideIpv6Cidr sets the TunnelInsideIpv6Cidr field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetTunnelInsideIpv6Cidr(v string) *ModifyVpnTunnelOptionsSpecification { + s.TunnelInsideIpv6Cidr = &v + return s +} + type MonitorInstancesInput struct { _ struct{} `type:"structure"` @@ -87680,10 +96002,58 @@ func (s *NetworkAclEntry) SetRuleNumber(v int64) *NetworkAclEntry { return s } +// Describes the network card support of the instance type. +type NetworkCardInfo struct { + _ struct{} `type:"structure"` + + // The maximum number of network interfaces for the network card. + MaximumNetworkInterfaces *int64 `locationName:"maximumNetworkInterfaces" type:"integer"` + + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + + // The network performance of the network card. + NetworkPerformance *string `locationName:"networkPerformance" type:"string"` +} + +// String returns the string representation +func (s NetworkCardInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkCardInfo) GoString() string { + return s.String() +} + +// SetMaximumNetworkInterfaces sets the MaximumNetworkInterfaces field's value. +func (s *NetworkCardInfo) SetMaximumNetworkInterfaces(v int64) *NetworkCardInfo { + s.MaximumNetworkInterfaces = &v + return s +} + +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *NetworkCardInfo) SetNetworkCardIndex(v int64) *NetworkCardInfo { + s.NetworkCardIndex = &v + return s +} + +// SetNetworkPerformance sets the NetworkPerformance field's value. +func (s *NetworkCardInfo) SetNetworkPerformance(v string) *NetworkCardInfo { + s.NetworkPerformance = &v + return s +} + // Describes the networking features of the instance type. type NetworkInfo struct { _ struct{} `type:"structure"` + // The index of the default network card, starting at 0. + DefaultNetworkCardIndex *int64 `locationName:"defaultNetworkCardIndex" type:"integer"` + + // Indicates whether Elastic Fabric Adapter (EFA) is supported. + EfaSupported *bool `locationName:"efaSupported" type:"boolean"` + // Indicates whether Elastic Network Adapter (ENA) is supported. EnaSupport *string `locationName:"enaSupport" type:"string" enum:"EnaSupport"` @@ -87696,10 +96066,17 @@ type NetworkInfo struct { // Indicates whether IPv6 is supported. Ipv6Supported *bool `locationName:"ipv6Supported" type:"boolean"` + // The maximum number of physical network cards that can be allocated to the + // instance. + MaximumNetworkCards *int64 `locationName:"maximumNetworkCards" type:"integer"` + // The maximum number of network interfaces for the instance type. MaximumNetworkInterfaces *int64 `locationName:"maximumNetworkInterfaces" type:"integer"` - // Describes the network performance. + // Describes the network cards for the instance type. + NetworkCards []*NetworkCardInfo `locationName:"networkCards" locationNameList:"item" type:"list"` + + // The network performance. NetworkPerformance *string `locationName:"networkPerformance" type:"string"` } @@ -87713,6 +96090,18 @@ func (s NetworkInfo) GoString() string { return s.String() } +// SetDefaultNetworkCardIndex sets the DefaultNetworkCardIndex field's value. +func (s *NetworkInfo) SetDefaultNetworkCardIndex(v int64) *NetworkInfo { + s.DefaultNetworkCardIndex = &v + return s +} + +// SetEfaSupported sets the EfaSupported field's value. +func (s *NetworkInfo) SetEfaSupported(v bool) *NetworkInfo { + s.EfaSupported = &v + return s +} + // SetEnaSupport sets the EnaSupport field's value. func (s *NetworkInfo) SetEnaSupport(v string) *NetworkInfo { s.EnaSupport = &v @@ -87737,12 +96126,24 @@ func (s *NetworkInfo) SetIpv6Supported(v bool) *NetworkInfo { return s } +// SetMaximumNetworkCards sets the MaximumNetworkCards field's value. +func (s *NetworkInfo) SetMaximumNetworkCards(v int64) *NetworkInfo { + s.MaximumNetworkCards = &v + return s +} + // SetMaximumNetworkInterfaces sets the MaximumNetworkInterfaces field's value. func (s *NetworkInfo) SetMaximumNetworkInterfaces(v int64) *NetworkInfo { s.MaximumNetworkInterfaces = &v return s } +// SetNetworkCards sets the NetworkCards field's value. +func (s *NetworkInfo) SetNetworkCards(v []*NetworkCardInfo) *NetworkInfo { + s.NetworkCards = v + return s +} + // SetNetworkPerformance sets the NetworkPerformance field's value. func (s *NetworkInfo) SetNetworkPerformance(v string) *NetworkInfo { s.NetworkPerformance = &v @@ -87955,7 +96356,9 @@ func (s *NetworkInterface) SetVpcId(v string) *NetworkInterface { return s } -// Describes association information for an Elastic IP address (IPv4 only). +// Describes association information for an Elastic IP address (IPv4 only), +// or a Carrier IP address (for a network interface which resides in a subnet +// in a Wavelength Zone). type NetworkInterfaceAssociation struct { _ struct{} `type:"structure"` @@ -87965,6 +96368,15 @@ type NetworkInterfaceAssociation struct { // The association ID. AssociationId *string `locationName:"associationId" type:"string"` + // The carrier IP address associated with the network interface. + // + // This option is only available when the network interface is in a subnet which + // is associated with a Wavelength Zone. + CarrierIp *string `locationName:"carrierIp" type:"string"` + + // The customer-owned IP address associated with the network interface. + CustomerOwnedIp *string `locationName:"customerOwnedIp" type:"string"` + // The ID of the Elastic IP address owner. IpOwnerId *string `locationName:"ipOwnerId" type:"string"` @@ -87997,6 +96409,18 @@ func (s *NetworkInterfaceAssociation) SetAssociationId(v string) *NetworkInterfa return s } +// SetCarrierIp sets the CarrierIp field's value. +func (s *NetworkInterfaceAssociation) SetCarrierIp(v string) *NetworkInterfaceAssociation { + s.CarrierIp = &v + return s +} + +// SetCustomerOwnedIp sets the CustomerOwnedIp field's value. +func (s *NetworkInterfaceAssociation) SetCustomerOwnedIp(v string) *NetworkInterfaceAssociation { + s.CustomerOwnedIp = &v + return s +} + // SetIpOwnerId sets the IpOwnerId field's value. func (s *NetworkInterfaceAssociation) SetIpOwnerId(v string) *NetworkInterfaceAssociation { s.IpOwnerId = &v @@ -88037,6 +96461,9 @@ type NetworkInterfaceAttachment struct { // The AWS account ID of the owner of the instance. InstanceOwnerId *string `locationName:"instanceOwnerId" type:"string"` + // The index of the network card. + NetworkCardIndex *int64 `locationName:"networkCardIndex" type:"integer"` + // The attachment state. Status *string `locationName:"status" type:"string" enum:"AttachmentStatus"` } @@ -88087,6 +96514,12 @@ func (s *NetworkInterfaceAttachment) SetInstanceOwnerId(v string) *NetworkInterf return s } +// SetNetworkCardIndex sets the NetworkCardIndex field's value. +func (s *NetworkInterfaceAttachment) SetNetworkCardIndex(v int64) *NetworkInterfaceAttachment { + s.NetworkCardIndex = &v + return s +} + // SetStatus sets the Status field's value. func (s *NetworkInterfaceAttachment) SetStatus(v string) *NetworkInterfaceAttachment { s.Status = &v @@ -89005,37 +97438,54 @@ type Placement struct { _ struct{} `type:"structure"` // The affinity setting for the instance on the Dedicated Host. This parameter - // is not supported for the ImportInstance command. + // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // command. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). Affinity *string `locationName:"affinity" type:"string"` // The Availability Zone of the instance. // // If not specified, an Availability Zone will be automatically chosen for you // based on the load balancing criteria for the Region. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). AvailabilityZone *string `locationName:"availabilityZone" type:"string"` // The name of the placement group the instance is in. GroupName *string `locationName:"groupName" type:"string"` // The ID of the Dedicated Host on which the instance resides. This parameter - // is not supported for the ImportInstance command. + // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // command. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). HostId *string `locationName:"hostId" type:"string"` // The ARN of the host resource group in which to launch the instances. If you // specify a host resource group ARN, omit the Tenancy parameter or set it to // host. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). HostResourceGroupArn *string `locationName:"hostResourceGroupArn" type:"string"` // The number of the partition the instance is in. Valid only if the placement // group strategy is set to partition. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). PartitionNumber *int64 `locationName:"partitionNumber" type:"integer"` // Reserved for future use. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). SpreadDomain *string `locationName:"spreadDomain" type:"string"` // The tenancy of the instance (if the instance is running in a VPC). An instance // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy - // is not supported for the ImportInstance command. + // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // command. + // + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` } @@ -89170,7 +97620,7 @@ func (s *PlacementGroup) SetTags(v []*Tag) *PlacementGroup { type PlacementGroupInfo struct { _ struct{} `type:"structure"` - // A list of supported placement groups types. + // The supported placement group types. SupportedStrategies []*string `locationName:"supportedStrategies" locationNameList:"item" type:"list"` } @@ -89214,6 +97664,30 @@ func (s *PlacementResponse) SetGroupName(v string) *PlacementResponse { return s } +// Describes a CIDR block for an address pool. +type PoolCidrBlock struct { + _ struct{} `type:"structure"` + + // The CIDR block. + Cidr *string `locationName:"poolCidrBlock" type:"string"` +} + +// String returns the string representation +func (s PoolCidrBlock) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PoolCidrBlock) GoString() string { + return s.String() +} + +// SetCidr sets the Cidr field's value. +func (s *PoolCidrBlock) SetCidr(v string) *PoolCidrBlock { + s.Cidr = &v + return s +} + // Describes a range of ports. type PortRange struct { _ struct{} `type:"structure"` @@ -89289,6 +97763,72 @@ func (s *PrefixList) SetPrefixListName(v string) *PrefixList { return s } +// Describes the resource with which a prefix list is associated. +type PrefixListAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The owner of the resource. + ResourceOwner *string `locationName:"resourceOwner" type:"string"` +} + +// String returns the string representation +func (s PrefixListAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListAssociation) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *PrefixListAssociation) SetResourceId(v string) *PrefixListAssociation { + s.ResourceId = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *PrefixListAssociation) SetResourceOwner(v string) *PrefixListAssociation { + s.ResourceOwner = &v + return s +} + +// Describes a prefix list entry. +type PrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + Cidr *string `locationName:"cidr" type:"string"` + + // The description. + Description *string `locationName:"description" type:"string"` +} + +// String returns the string representation +func (s PrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListEntry) GoString() string { + return s.String() +} + +// SetCidr sets the Cidr field's value. +func (s *PrefixListEntry) SetCidr(v string) *PrefixListEntry { + s.Cidr = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *PrefixListEntry) SetDescription(v string) *PrefixListEntry { + s.Description = &v + return s +} + // Describes a prefix list ID. type PrefixListId struct { _ struct{} `type:"structure"` @@ -89595,7 +98135,7 @@ func (s *PrivateIpAddressSpecification) SetPrivateIpAddress(v string) *PrivateIp type ProcessorInfo struct { _ struct{} `type:"structure"` - // A list of architectures supported by the instance type. + // The architectures supported by the instance type. SupportedArchitectures []*string `locationName:"supportedArchitectures" locationNameList:"item" type:"list"` // The speed of the processor, in GHz. @@ -89684,9 +98224,10 @@ func (s *PropagatingVgw) SetGatewayId(v string) *PropagatingVgw { type ProvisionByoipCidrInput struct { _ struct{} `type:"structure"` - // The public IPv4 address range, in CIDR notation. The most specific prefix - // that you can specify is /24. The address range cannot overlap with another - // address range that you've brought to this or another Region. + // The public IPv4 or IPv6 address range, in CIDR notation. The most specific + // IPv4 prefix that you can specify is /24. The most specific IPv6 prefix you + // can specify is /56. The address range cannot overlap with another address + // range that you've brought to this or another Region. // // Cidr is a required field Cidr *string `type:"string" required:"true"` @@ -89703,6 +98244,15 @@ type ProvisionByoipCidrInput struct { // the required permissions, the error response is DryRunOperation. Otherwise, // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + + // The tags to apply to the address pool. + PoolTagSpecifications []*TagSpecification `locationName:"PoolTagSpecification" locationNameList:"item" type:"list"` + + // (IPv6 only) Indicate whether the address range will be publicly advertised + // to the internet. + // + // Default: true + PubliclyAdvertisable *bool `type:"boolean"` } // String returns the string representation @@ -89757,10 +98307,22 @@ func (s *ProvisionByoipCidrInput) SetDryRun(v bool) *ProvisionByoipCidrInput { return s } +// SetPoolTagSpecifications sets the PoolTagSpecifications field's value. +func (s *ProvisionByoipCidrInput) SetPoolTagSpecifications(v []*TagSpecification) *ProvisionByoipCidrInput { + s.PoolTagSpecifications = v + return s +} + +// SetPubliclyAdvertisable sets the PubliclyAdvertisable field's value. +func (s *ProvisionByoipCidrInput) SetPubliclyAdvertisable(v bool) *ProvisionByoipCidrInput { + s.PubliclyAdvertisable = &v + return s +} + type ProvisionByoipCidrOutput struct { _ struct{} `type:"structure"` - // Information about the address pool. + // Information about the address range. ByoipCidr *ByoipCidr `locationName:"byoipCidr" type:"structure"` } @@ -89852,19 +98414,27 @@ func (s *ProvisionedBandwidth) SetStatus(v string) *ProvisionedBandwidth { return s } -// Describes an address pool. +// Describes an IPv4 address pool. type PublicIpv4Pool struct { _ struct{} `type:"structure"` // A description of the address pool. Description *string `locationName:"description" type:"string"` + // The name of the location from which the address pool is advertised. A network + // border group is a unique set of Availability Zones or Local Zones from where + // AWS advertises public IP addresses. + NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` + // The address ranges. PoolAddressRanges []*PublicIpv4PoolRange `locationName:"poolAddressRangeSet" locationNameList:"item" type:"list"` - // The ID of the IPv4 address pool. + // The ID of the address pool. PoolId *string `locationName:"poolId" type:"string"` + // Any tags for the address pool. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + // The total number of addresses. TotalAddressCount *int64 `locationName:"totalAddressCount" type:"integer"` @@ -89888,6 +98458,12 @@ func (s *PublicIpv4Pool) SetDescription(v string) *PublicIpv4Pool { return s } +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *PublicIpv4Pool) SetNetworkBorderGroup(v string) *PublicIpv4Pool { + s.NetworkBorderGroup = &v + return s +} + // SetPoolAddressRanges sets the PoolAddressRanges field's value. func (s *PublicIpv4Pool) SetPoolAddressRanges(v []*PublicIpv4PoolRange) *PublicIpv4Pool { s.PoolAddressRanges = v @@ -89900,6 +98476,12 @@ func (s *PublicIpv4Pool) SetPoolId(v string) *PublicIpv4Pool { return s } +// SetTags sets the Tags field's value. +func (s *PublicIpv4Pool) SetTags(v []*Tag) *PublicIpv4Pool { + s.Tags = v + return s +} + // SetTotalAddressCount sets the TotalAddressCount field's value. func (s *PublicIpv4Pool) SetTotalAddressCount(v int64) *PublicIpv4Pool { s.TotalAddressCount = &v @@ -90080,6 +98662,9 @@ type PurchaseHostReservationInput struct { // // OfferingId is a required field OfferingId *string `type:"string" required:"true"` + + // The tags to apply to the Dedicated Host Reservation during purchase. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -90138,6 +98723,12 @@ func (s *PurchaseHostReservationInput) SetOfferingId(v string) *PurchaseHostRese return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *PurchaseHostReservationInput) SetTagSpecifications(v []*TagSpecification) *PurchaseHostReservationInput { + s.TagSpecifications = v + return s +} + type PurchaseHostReservationOutput struct { _ struct{} `type:"structure"` @@ -90799,6 +99390,101 @@ func (s *RegisterImageOutput) SetImageId(v string) *RegisterImageOutput { return s } +type RegisterInstanceEventNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Information about the tag keys to register. + InstanceTagAttribute *RegisterInstanceTagAttributeRequest `type:"structure"` +} + +// String returns the string representation +func (s RegisterInstanceEventNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceEventNotificationAttributesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *RegisterInstanceEventNotificationAttributesInput) SetDryRun(v bool) *RegisterInstanceEventNotificationAttributesInput { + s.DryRun = &v + return s +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *RegisterInstanceEventNotificationAttributesInput) SetInstanceTagAttribute(v *RegisterInstanceTagAttributeRequest) *RegisterInstanceEventNotificationAttributesInput { + s.InstanceTagAttribute = v + return s +} + +type RegisterInstanceEventNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // The resulting set of tag keys. + InstanceTagAttribute *InstanceTagNotificationAttribute `locationName:"instanceTagAttribute" type:"structure"` +} + +// String returns the string representation +func (s RegisterInstanceEventNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceEventNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *RegisterInstanceEventNotificationAttributesOutput) SetInstanceTagAttribute(v *InstanceTagNotificationAttribute) *RegisterInstanceEventNotificationAttributesOutput { + s.InstanceTagAttribute = v + return s +} + +// Information about the tag keys to register for the current Region. You can +// either specify individual tag keys or register all tag keys in the current +// Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys +// in the request +type RegisterInstanceTagAttributeRequest struct { + _ struct{} `type:"structure"` + + // Indicates whether to register all tag keys in the current Region. Specify + // true to register all tag keys. + IncludeAllTagsOfInstance *bool `type:"boolean"` + + // The tag keys to register. + InstanceTagKeys []*string `locationName:"InstanceTagKey" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RegisterInstanceTagAttributeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceTagAttributeRequest) GoString() string { + return s.String() +} + +// SetIncludeAllTagsOfInstance sets the IncludeAllTagsOfInstance field's value. +func (s *RegisterInstanceTagAttributeRequest) SetIncludeAllTagsOfInstance(v bool) *RegisterInstanceTagAttributeRequest { + s.IncludeAllTagsOfInstance = &v + return s +} + +// SetInstanceTagKeys sets the InstanceTagKeys field's value. +func (s *RegisterInstanceTagAttributeRequest) SetInstanceTagKeys(v []*string) *RegisterInstanceTagAttributeRequest { + s.InstanceTagKeys = v + return s +} + type RegisterTransitGatewayMulticastGroupMembersInput struct { _ struct{} `type:"structure"` @@ -91271,7 +99957,8 @@ type ReleaseAddressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The location that the IP address is released from. + // The set of Availability Zones, Local Zones, or Wavelength Zones from which + // AWS advertises IP addresses. // // If you provide an incorrect network border group, you will receive an InvalidAddress.NotFound // error. For more information, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). @@ -91404,6 +100091,45 @@ func (s *ReleaseHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ReleaseHost return s } +// An entry for a prefix list. +type RemovePrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePrefixListEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemovePrefixListEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemovePrefixListEntry"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *RemovePrefixListEntry) SetCidr(v string) *RemovePrefixListEntry { + s.Cidr = &v + return s +} + type ReplaceIamInstanceProfileAssociationInput struct { _ struct{} `type:"structure"` @@ -91736,6 +100462,9 @@ func (s ReplaceNetworkAclEntryOutput) GoString() string { type ReplaceRouteInput struct { _ struct{} `type:"structure"` + // [IPv4 traffic only] The ID of a carrier gateway. + CarrierGatewayId *string `type:"string"` + // The IPv4 CIDR address block used for the destination match. The value that // you provide must match the CIDR of an existing route in the table. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` @@ -91744,6 +100473,9 @@ type ReplaceRouteInput struct { // you provide must match the CIDR of an existing route in the table. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + // The ID of the prefix list for the route. + DestinationPrefixListId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -91806,6 +100538,12 @@ func (s *ReplaceRouteInput) Validate() error { return nil } +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *ReplaceRouteInput) SetCarrierGatewayId(v string) *ReplaceRouteInput { + s.CarrierGatewayId = &v + return s +} + // SetDestinationCidrBlock sets the DestinationCidrBlock field's value. func (s *ReplaceRouteInput) SetDestinationCidrBlock(v string) *ReplaceRouteInput { s.DestinationCidrBlock = &v @@ -91818,6 +100556,12 @@ func (s *ReplaceRouteInput) SetDestinationIpv6CidrBlock(v string) *ReplaceRouteI return s } +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *ReplaceRouteInput) SetDestinationPrefixListId(v string) *ReplaceRouteInput { + s.DestinationPrefixListId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *ReplaceRouteInput) SetDryRun(v bool) *ReplaceRouteInput { s.DryRun = &v @@ -92245,12 +100989,6 @@ type RequestLaunchTemplateData struct { _ struct{} `type:"structure"` // The block device mapping. - // - // Supplying both a snapshot ID and an encryption value as arguments for block-device - // mapping results in an error. This is because only blank volumes can be encrypted - // on start, and these are not created from a snapshot. If a snapshot is the - // basis for the volume, it contains data by definition and its encryption status - // cannot be changed using this action. BlockDeviceMappings []*LaunchTemplateBlockDeviceMappingRequest `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` // The Capacity Reservation targeting option. If you do not specify this parameter, @@ -92264,8 +101002,8 @@ type RequestLaunchTemplateData struct { // in the Amazon Elastic Compute Cloud User Guide. CpuOptions *LaunchTemplateCpuOptionsRequest `type:"structure"` - // The credit option for CPU usage of the instance. Valid for T2 or T3 instances - // only. + // The credit option for CPU usage of the instance. Valid for T2, T3, or T3a + // instances only. CreditSpecification *CreditSpecificationRequest `type:"structure"` // If you set this parameter to true, you can't terminate the instance using @@ -92288,6 +101026,13 @@ type RequestLaunchTemplateData struct { // The elastic inference accelerator for the instance. ElasticInferenceAccelerators []*LaunchTemplateElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` + // Indicates whether the instance is enabled for AWS Nitro Enclaves. For more + // information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) + // in the AWS Nitro Enclaves User Guide. + // + // You can't enable AWS Nitro Enclaves and hibernation on the same instance. + EnclaveOptions *LaunchTemplateEnclaveOptionsRequest `type:"structure"` + // Indicates whether an instance is enabled for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). // For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) @@ -92469,6 +101214,12 @@ func (s *RequestLaunchTemplateData) SetElasticInferenceAccelerators(v []*LaunchT return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *RequestLaunchTemplateData) SetEnclaveOptions(v *LaunchTemplateEnclaveOptionsRequest) *RequestLaunchTemplateData { + s.EnclaveOptions = v + return s +} + // SetHibernationOptions sets the HibernationOptions field's value. func (s *RequestLaunchTemplateData) SetHibernationOptions(v *LaunchTemplateHibernationOptionsRequest) *RequestLaunchTemplateData { s.HibernationOptions = v @@ -92693,6 +101444,9 @@ type RequestSpotInstancesInput struct { // // You can't specify an Availability Zone group or a launch group if you specify // a duration. + // + // New accounts or accounts with no previous billing history with AWS are not + // eligible for Spot Instances with a defined duration (also known as Spot blocks). BlockDurationMinutes *int64 `locationName:"blockDurationMinutes" type:"integer"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -92727,6 +101481,12 @@ type RequestSpotInstancesInput struct { // The default is the On-Demand price. SpotPrice *string `locationName:"spotPrice" type:"string"` + // The key-value pair for tagging the Spot Instance request on creation. The + // value for ResourceType must be spot-instances-request, otherwise the Spot + // Instance request fails. To tag the Spot Instance request after it has been + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The Spot Instance request type. // // Default: one-time @@ -92743,11 +101503,16 @@ type RequestSpotInstancesInput struct { // date and time. ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` - // The end date of the request. If this is a one-time request, the request remains - // active until all instances launch, the request is canceled, or this date - // is reached. If the request is persistent, it remains active until it is canceled - // or this date is reached. The default end date is 7 days from the current - // date. + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // + // * For a persistent request, the request remains active until the ValidUntil + // date and time is reached. Otherwise, the request remains active until + // you cancel it. + // + // * For a one-time request, the request remains active until all instances + // launch, the request is canceled, or the ValidUntil date and time is reached. + // By default, the request is valid for 7 days from the date the request + // was created. ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` } @@ -92830,6 +101595,12 @@ func (s *RequestSpotInstancesInput) SetSpotPrice(v string) *RequestSpotInstances return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *RequestSpotInstancesInput) SetTagSpecifications(v []*TagSpecification) *RequestSpotInstancesInput { + s.TagSpecifications = v + return s +} + // SetType sets the Type field's value. func (s *RequestSpotInstancesInput) SetType(v string) *RequestSpotInstancesInput { s.Type = &v @@ -93063,7 +101834,9 @@ func (s *RequestSpotLaunchSpecification) SetUserData(v string) *RequestSpotLaunc return s } -// Describes a reservation. +// Describes a launch request for one or more instances, and includes owner, +// requester, and security group information that applies to all instances in +// the launch request. type Reservation struct { _ struct{} `type:"structure"` @@ -94409,6 +103182,9 @@ type ResponseLaunchTemplateData struct { // The elastic inference accelerator for the instance. ElasticInferenceAccelerators []*LaunchTemplateElasticInferenceAcceleratorResponse `locationName:"elasticInferenceAcceleratorSet" locationNameList:"item" type:"list"` + // Indicates whether the instance is enabled for AWS Nitro Enclaves. + EnclaveOptions *LaunchTemplateEnclaveOptions `locationName:"enclaveOptions" type:"structure"` + // Indicates whether an instance is configured for hibernation. For more information, // see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -94527,6 +103303,12 @@ func (s *ResponseLaunchTemplateData) SetElasticInferenceAccelerators(v []*Launch return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *ResponseLaunchTemplateData) SetEnclaveOptions(v *LaunchTemplateEnclaveOptions) *ResponseLaunchTemplateData { + s.EnclaveOptions = v + return s +} + // SetHibernationOptions sets the HibernationOptions field's value. func (s *ResponseLaunchTemplateData) SetHibernationOptions(v *LaunchTemplateHibernationOptions) *ResponseLaunchTemplateData { s.HibernationOptions = v @@ -94717,6 +103499,107 @@ func (s *RestoreAddressToClassicOutput) SetStatus(v string) *RestoreAddressToCla return s } +type RestoreManagedPrefixListVersionInput struct { + _ struct{} `type:"structure"` + + // The current version number for the prefix list. + // + // CurrentVersion is a required field + CurrentVersion *int64 `type:"long" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The version to restore. + // + // PreviousVersion is a required field + PreviousVersion *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s RestoreManagedPrefixListVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreManagedPrefixListVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreManagedPrefixListVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreManagedPrefixListVersionInput"} + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.PreviousVersion == nil { + invalidParams.Add(request.NewErrParamRequired("PreviousVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *RestoreManagedPrefixListVersionInput) SetCurrentVersion(v int64) *RestoreManagedPrefixListVersionInput { + s.CurrentVersion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RestoreManagedPrefixListVersionInput) SetDryRun(v bool) *RestoreManagedPrefixListVersionInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *RestoreManagedPrefixListVersionInput) SetPrefixListId(v string) *RestoreManagedPrefixListVersionInput { + s.PrefixListId = &v + return s +} + +// SetPreviousVersion sets the PreviousVersion field's value. +func (s *RestoreManagedPrefixListVersionInput) SetPreviousVersion(v int64) *RestoreManagedPrefixListVersionInput { + s.PreviousVersion = &v + return s +} + +type RestoreManagedPrefixListVersionOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s RestoreManagedPrefixListVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreManagedPrefixListVersionOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *RestoreManagedPrefixListVersionOutput) SetPrefixList(v *ManagedPrefixList) *RestoreManagedPrefixListVersionOutput { + s.PrefixList = v + return s +} + type RevokeClientVpnIngressInput struct { _ struct{} `type:"structure"` @@ -94942,6 +103825,13 @@ func (s *RevokeSecurityGroupEgressInput) SetToPort(v int64) *RevokeSecurityGroup type RevokeSecurityGroupEgressOutput struct { _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The outbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -94954,6 +103844,18 @@ func (s RevokeSecurityGroupEgressOutput) GoString() string { return s.String() } +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupEgressOutput) SetReturn(v bool) *RevokeSecurityGroupEgressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupEgressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupEgressOutput { + s.UnknownIpPermissions = v + return s +} + type RevokeSecurityGroupIngressInput struct { _ struct{} `type:"structure"` @@ -95081,6 +103983,13 @@ func (s *RevokeSecurityGroupIngressInput) SetToPort(v int64) *RevokeSecurityGrou type RevokeSecurityGroupIngressOutput struct { _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The inbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -95093,10 +104002,25 @@ func (s RevokeSecurityGroupIngressOutput) GoString() string { return s.String() } +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupIngressOutput) SetReturn(v bool) *RevokeSecurityGroupIngressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupIngressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupIngressOutput { + s.UnknownIpPermissions = v + return s +} + // Describes a route in a route table. type Route struct { _ struct{} `type:"structure"` + // The ID of the carrier gateway. + CarrierGatewayId *string `locationName:"carrierGatewayId" type:"string"` + // The IPv4 CIDR block used for the destination match. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` @@ -95159,6 +104083,12 @@ func (s Route) GoString() string { return s.String() } +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *Route) SetCarrierGatewayId(v string) *Route { + s.CarrierGatewayId = &v + return s +} + // SetDestinationCidrBlock sets the DestinationCidrBlock field's value. func (s *Route) SetDestinationCidrBlock(v string) *Route { s.DestinationCidrBlock = &v @@ -95440,20 +104370,23 @@ type RunInstancesInput struct { CapacityReservationSpecification *CapacityReservationSpecification `type:"structure"` // Unique, case-sensitive identifier you provide to ensure the idempotency of - // the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // the request. If you do not specify a client token, a randomly generated token + // is used for the request to ensure idempotency. + // + // For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). // // Constraints: Maximum 64 ASCII characters - ClientToken *string `locationName:"clientToken" type:"string"` + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` // The CPU options for the instance. For more information, see Optimizing CPU - // Options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) // in the Amazon Elastic Compute Cloud User Guide. CpuOptions *CpuOptionsRequest `type:"structure"` // The credit option for CPU usage of the burstable performance instance. Valid // values are standard and unlimited. To change this attribute after launch, // use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). - // For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) + // For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Default: standard (T2 instances) or unlimited (T3/T3a instances) @@ -95492,11 +104425,22 @@ type RunInstancesInput struct { // An elastic inference accelerator to associate with the instance. Elastic // inference accelerators are a resource you can attach to your Amazon EC2 instances // to accelerate your Deep Learning (DL) inference workloads. + // + // You cannot specify accelerators from different generations in the same request. ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` + // Indicates whether the instance is enabled for AWS Nitro Enclaves. For more + // information, see What is AWS Nitro Enclaves? (https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) + // in the AWS Nitro Enclaves User Guide. + // + // You can't enable AWS Nitro Enclaves and hibernation on the same instance. + EnclaveOptions *EnclaveOptionsRequest `type:"structure"` + // Indicates whether an instance is enabled for hibernation. For more information, - // see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. + // + // You can't enable hibernation and AWS Nitro Enclaves on the same instance. HibernationOptions *HibernationOptionsRequest `type:"structure"` // The IAM instance profile. @@ -95518,7 +104462,7 @@ type RunInstancesInput struct { // InstanceInterruptionBehavior is set to either hibernate or stop. InstanceMarketOptions *InstanceMarketOptionsRequest `type:"structure"` - // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) // in the Amazon Elastic Compute Cloud User Guide. // // Default: m1.small @@ -95578,7 +104522,7 @@ type RunInstancesInput struct { MaxCount *int64 `type:"integer" required:"true"` // The metadata options for the instance. For more information, see Instance - // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). MetadataOptions *InstanceMetadataOptionsRequest `type:"structure"` // The minimum number of instances to launch. If you specify a minimum that @@ -95655,7 +104599,7 @@ type RunInstancesInput struct { TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The user data to make available to the instance. For more information, see - // Running Commands on Your Linux Instance at Launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + // Running commands on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) // (Linux) and Adding User Data (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) // (Windows). If you are using a command line tool, base64-encoding is performed // for you, and you can load the text from a file. Otherwise, you must provide @@ -95785,6 +104729,12 @@ func (s *RunInstancesInput) SetElasticInferenceAccelerators(v []*ElasticInferenc return s } +// SetEnclaveOptions sets the EnclaveOptions field's value. +func (s *RunInstancesInput) SetEnclaveOptions(v *EnclaveOptionsRequest) *RunInstancesInput { + s.EnclaveOptions = v + return s +} + // SetHibernationOptions sets the HibernationOptions field's value. func (s *RunInstancesInput) SetHibernationOptions(v *HibernationOptionsRequest) *RunInstancesInput { s.HibernationOptions = v @@ -96646,19 +105596,15 @@ type ScheduledInstancesEbs struct { // only to instances that support them. Encrypted *bool `type:"boolean"` - // The number of I/O operations per second (IOPS) that the volume supports. - // For io1 volumes, this represents the number of IOPS that are provisioned - // for the volume. For gp2 volumes, this represents the baseline performance - // of the volume and the rate at which the volume accumulates I/O credits for - // bursting. For more information about gp2 baseline performance, I/O credits, - // and bursting, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // The number of I/O operations per second (IOPS) to provision for an io1 or + // io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB + // for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum + // IOPS of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Other instance families guarantee performance up to 32,000 IOPS. For more + // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for - // gp2 volumes. - // - // Condition: This parameter is required for requests to create io1volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes. Iops *int64 `type:"integer"` // The ID of the snapshot. @@ -96670,8 +105616,9 @@ type ScheduledInstancesEbs struct { // a volume size, the default is the snapshot size. VolumeSize *int64 `type:"integer"` - // The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, - // Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic. + // The volume type. gp2 for General Purpose SSD, io1 or io2 for Provisioned + // IOPS SSD, Throughput Optimized HDD for st1, Cold HDD for sc1, or standard + // for Magnetic. // // Default: gp2 VolumeType *string `type:"string"` @@ -97430,7 +106377,10 @@ type SearchTransitGatewayRoutesInput struct { // // * attachment.resource-id - The resource id of the transit gateway attachment. // - // * attachment.resource-type - The attachment resource type (vpc | vpn). + // * attachment.resource-type - The attachment resource type. Valid values + // are vpc | vpn | direct-connect-gateway | peering. + // + // * prefix-list-id - The ID of the prefix list. // // * route-search.exact-match - The exact match of the specified filter. // @@ -98161,9 +107111,8 @@ type Snapshot struct { // key for the parent volume. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` - // Value from an Amazon-maintained list (amazon | self | all | aws-marketplace - // | microsoft) of snapshot owners. Not to be confused with the user-configured - // AWS account alias, which is set from the IAM console. + // The AWS owner alias, from an Amazon-maintained list (amazon). This is not + // the user-configured AWS account alias set using the IAM console. OwnerAlias *string `locationName:"ownerAlias" type:"string"` // The AWS account ID of the EBS snapshot owner. @@ -98326,7 +107275,7 @@ type SnapshotDetail struct { // The URL used to access the disk image. Url *string `locationName:"url" type:"string"` - // The S3 bucket for the disk image. + // The Amazon S3 bucket for the disk image. UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` } @@ -98416,7 +107365,7 @@ type SnapshotDiskContainer struct { // a https URL (https://..) or an Amazon S3 URL (s3://..). Url *string `type:"string"` - // The S3 bucket for the disk image. + // The Amazon S3 bucket for the disk image. UserBucket *UserBucket `type:"structure"` } @@ -98596,7 +107545,7 @@ type SnapshotTaskDetail struct { // The URL of the disk image from which the snapshot is created. Url *string `locationName:"url" type:"string"` - // The S3 bucket for the disk image. + // The Amazon S3 bucket for the disk image. UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` } @@ -98676,11 +107625,48 @@ func (s *SnapshotTaskDetail) SetUserBucket(v *UserBucketDetails) *SnapshotTaskDe return s } +// The Spot Instance replacement strategy to use when Amazon EC2 emits a signal +// that your Spot Instance is at an elevated risk of being interrupted. For +// more information, see Capacity rebalancing (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-configuration-strategies.html#spot-fleet-capacity-rebalance) +// in the Amazon EC2 User Guide for Linux Instances. +type SpotCapacityRebalance struct { + _ struct{} `type:"structure"` + + // The replacement strategy to use. Only available for fleets of type maintain. + // You must specify a value, otherwise you get an error. + // + // To allow Spot Fleet to launch a replacement Spot Instance when an instance + // rebalance notification is emitted for a Spot Instance in the fleet, specify + // launch. + // + // When a replacement instance is launched, the instance marked for rebalance + // is not automatically terminated. You can terminate it, or you can wait until + // Amazon EC2 interrupts it. You are charged for all instances while they are + // running. + ReplacementStrategy *string `locationName:"replacementStrategy" type:"string" enum:"ReplacementStrategy"` +} + +// String returns the string representation +func (s SpotCapacityRebalance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotCapacityRebalance) GoString() string { + return s.String() +} + +// SetReplacementStrategy sets the ReplacementStrategy field's value. +func (s *SpotCapacityRebalance) SetReplacementStrategy(v string) *SpotCapacityRebalance { + s.ReplacementStrategy = &v + return s +} + // Describes the data feed for a Spot Instance. type SpotDatafeedSubscription struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket where the Spot Instance data feed is located. + // The name of the Amazon S3 bucket where the Spot Instance data feed is located. Bucket *string `locationName:"bucket" type:"string"` // The fault codes for the Spot Instance request, if any. @@ -98689,7 +107675,7 @@ type SpotDatafeedSubscription struct { // The AWS account ID of the account. OwnerId *string `locationName:"ownerId" type:"string"` - // The prefix that is prepended to data feed files. + // The prefix for the data feed files. Prefix *string `locationName:"prefix" type:"string"` // The state of the Spot Instance data feed subscription. @@ -98737,8 +107723,9 @@ func (s *SpotDatafeedSubscription) SetState(v string) *SpotDatafeedSubscription } // Describes the launch specification for one or more Spot Instances. If you -// include On-Demand capacity in your fleet request, you can't use SpotFleetLaunchSpecification; -// you must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). +// include On-Demand capacity in your fleet request or want to specify an EFA +// network device, you can't use SpotFleetLaunchSpecification; you must use +// LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). type SpotFleetLaunchSpecification struct { _ struct{} `type:"structure"` @@ -98781,6 +107768,9 @@ type SpotFleetLaunchSpecification struct { // One or more network interfaces. If you specify a network interface, you must // specify subnet IDs and security group IDs using the network interface. + // + // SpotFleetLaunchSpecification currently does not support Elastic Fabric Adapter + // (EFA). To specify an EFA, you must use LaunchTemplateConfig (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_LaunchTemplateConfig.html). NetworkInterfaces []*InstanceNetworkInterfaceSpecification `locationName:"networkInterfaceSet" locationNameList:"item" type:"list"` // The placement information. @@ -98990,6 +107980,9 @@ type SpotFleetRequestConfig struct { // The state of the Spot Fleet request. SpotFleetRequestState *string `locationName:"spotFleetRequestState" type:"string" enum:"BatchState"` + + // The tags for a Spot Fleet resource. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -99032,6 +108025,12 @@ func (s *SpotFleetRequestConfig) SetSpotFleetRequestState(v string) *SpotFleetRe return s } +// SetTags sets the Tags field's value. +func (s *SpotFleetRequestConfig) SetTags(v []*Tag) *SpotFleetRequestConfig { + s.Tags = v + return s +} + // Describes the configuration of a Spot Fleet request. type SpotFleetRequestConfigData struct { _ struct{} `type:"structure"` @@ -99067,12 +108066,12 @@ type SpotFleetRequestConfigData struct { // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) // role that grants the Spot Fleet the permission to request, launch, terminate, - // and tag instances on your behalf. For more information, see Spot Fleet Prerequisites + // and tag instances on your behalf. For more information, see Spot Fleet prerequisites // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html#spot-fleet-prerequisites) // in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate // Spot Instances on your behalf when you cancel its Spot Fleet request using - // CancelSpotFleetRequests or when the Spot Fleet request expires, if you set - // TerminateInstancesWithExpiration. + // CancelSpotFleetRequests (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CancelSpotFleetRequests) + // or when the Spot Fleet request expires, if you set TerminateInstancesWithExpiration. // // IamFleetRole is a required field IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` @@ -99137,6 +108136,10 @@ type SpotFleetRequestConfigData struct { // Indicates whether Spot Fleet should replace unhealthy instances. ReplaceUnhealthyInstances *bool `locationName:"replaceUnhealthyInstances" type:"boolean"` + // The strategies for managing your Spot Instances that are at an elevated risk + // of being interrupted. + SpotMaintenanceStrategies *SpotMaintenanceStrategies `locationName:"spotMaintenanceStrategies" type:"structure"` + // The maximum amount per hour for Spot Instances that you're willing to pay. // You can use the spotdMaxTotalPrice parameter, the onDemandMaxTotalPrice parameter, // or both parameters to ensure that your fleet cost does not exceed your budget. @@ -99151,6 +108154,16 @@ type SpotFleetRequestConfigData struct { // The default is the On-Demand price. SpotPrice *string `locationName:"spotPrice" type:"string"` + // The key-value pair for tagging the Spot Fleet request on creation. The value + // for ResourceType must be spot-fleet-request, otherwise the Spot Fleet request + // fails. To tag instances at launch, specify the tags in the launch template + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template) + // (valid only if you use LaunchTemplateConfigs) or in the SpotFleetTagSpecification + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetTagSpecification.html) + // (valid only if you use LaunchSpecifications). For information about tagging + // after launch, see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-resources). + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The number of units to request for the Spot Fleet. You can choose to set // the target capacity in terms of instances or a performance characteristic // that is important to your application workload, such as vCPUs, memory, or @@ -99316,6 +108329,12 @@ func (s *SpotFleetRequestConfigData) SetReplaceUnhealthyInstances(v bool) *SpotF return s } +// SetSpotMaintenanceStrategies sets the SpotMaintenanceStrategies field's value. +func (s *SpotFleetRequestConfigData) SetSpotMaintenanceStrategies(v *SpotMaintenanceStrategies) *SpotFleetRequestConfigData { + s.SpotMaintenanceStrategies = v + return s +} + // SetSpotMaxTotalPrice sets the SpotMaxTotalPrice field's value. func (s *SpotFleetRequestConfigData) SetSpotMaxTotalPrice(v string) *SpotFleetRequestConfigData { s.SpotMaxTotalPrice = &v @@ -99328,6 +108347,12 @@ func (s *SpotFleetRequestConfigData) SetSpotPrice(v string) *SpotFleetRequestCon return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *SpotFleetRequestConfigData) SetTagSpecifications(v []*TagSpecification) *SpotFleetRequestConfigData { + s.TagSpecifications = v + return s +} + // SetTargetCapacity sets the TargetCapacity field's value. func (s *SpotFleetRequestConfigData) SetTargetCapacity(v int64) *SpotFleetRequestConfigData { s.TargetCapacity = &v @@ -99363,7 +108388,8 @@ type SpotFleetTagSpecification struct { _ struct{} `type:"structure"` // The type of resource. Currently, the only resource type that is supported - // is instance. + // is instance. To tag the Spot Fleet request on creation, use the TagSpecifications + // parameter in SpotFleetRequestConfigData (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetRequestConfigData.html). ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` // The tags. @@ -99442,7 +108468,7 @@ type SpotInstanceRequest struct { SpotPrice *string `locationName:"spotPrice" type:"string"` // The state of the Spot Instance request. Spot status information helps track - // your Spot Instance requests. For more information, see Spot Status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // your Spot Instance requests. For more information, see Spot status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) // in the Amazon EC2 User Guide for Linux Instances. State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` @@ -99459,11 +108485,16 @@ type SpotInstanceRequest struct { // The request becomes active at this date and time. ValidFrom *time.Time `locationName:"validFrom" type:"timestamp"` - // The end date of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). - // If this is a one-time request, it remains active until all instances launch, - // the request is canceled, or this date is reached. If the request is persistent, - // it remains active until it is canceled or this date is reached. The default - // end date is 7 days from the current date. + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). + // + // * For a persistent request, the request remains active until the validUntil + // date and time is reached. Otherwise, the request remains active until + // you cancel it. + // + // * For a one-time request, the request remains active until all instances + // launch, the request is canceled, or the validUntil date and time is reached. + // By default, the request is valid for 7 days from the date the request + // was created. ValidUntil *time.Time `locationName:"validUntil" type:"timestamp"` } @@ -99628,7 +108659,7 @@ func (s *SpotInstanceStateFault) SetMessage(v string) *SpotInstanceStateFault { type SpotInstanceStatus struct { _ struct{} `type:"structure"` - // The status code. For a list of status codes, see Spot Status Codes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) + // The status code. For a list of status codes, see Spot status codes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) // in the Amazon EC2 User Guide for Linux Instances. Code *string `locationName:"code" type:"string"` @@ -99668,6 +108699,32 @@ func (s *SpotInstanceStatus) SetUpdateTime(v time.Time) *SpotInstanceStatus { return s } +// The strategies for managing your Spot Instances that are at an elevated risk +// of being interrupted. +type SpotMaintenanceStrategies struct { + _ struct{} `type:"structure"` + + // The strategy to use when Amazon EC2 emits a signal that your Spot Instance + // is at an elevated risk of being interrupted. + CapacityRebalance *SpotCapacityRebalance `locationName:"capacityRebalance" type:"structure"` +} + +// String returns the string representation +func (s SpotMaintenanceStrategies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotMaintenanceStrategies) GoString() string { + return s.String() +} + +// SetCapacityRebalance sets the CapacityRebalance field's value. +func (s *SpotMaintenanceStrategies) SetCapacityRebalance(v *SpotCapacityRebalance) *SpotMaintenanceStrategies { + s.CapacityRebalance = v + return s +} + // The options for Spot Instances. type SpotMarketOptions struct { _ struct{} `type:"structure"` @@ -99675,6 +108732,17 @@ type SpotMarketOptions struct { // The required duration for the Spot Instances (also known as Spot blocks), // in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, // or 360). + // + // The duration period starts as soon as your Spot Instance receives its instance + // ID. At the end of the duration period, Amazon EC2 marks the Spot Instance + // for termination and provides a Spot Instance termination notice, which gives + // the instance a two-minute warning before it terminates. + // + // You can't specify an Availability Zone group or a launch group if you specify + // a duration. + // + // New accounts or accounts with no previous billing history with AWS are not + // eligible for Spot Instances with a defined duration (also known as Spot blocks). BlockDurationMinutes *int64 `type:"integer"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -99684,16 +108752,20 @@ type SpotMarketOptions struct { // default is the On-Demand price. MaxPrice *string `type:"string"` - // The Spot Instance request type. For RunInstances, persistent Spot Instance - // requests are only supported when InstanceInterruptionBehavior is set to either - // hibernate or stop. + // The Spot Instance request type. For RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances), + // persistent Spot Instance requests are only supported when InstanceInterruptionBehavior + // is set to either hibernate or stop. SpotInstanceType *string `type:"string" enum:"SpotInstanceType"` - // The end date of the request. For a one-time request, the request remains - // active until all instances launch, the request is canceled, or this date - // is reached. If the request is persistent, it remains active until it is canceled - // or this date and time is reached. The default end date is 7 days from the - // current date. + // The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ). Supported + // only for persistent requests. + // + // * For a persistent request, the request remains active until the ValidUntil + // date and time is reached. Otherwise, the request remains active until + // you cancel it. + // + // * For a one-time request, ValidUntil is not supported. The request remains + // active until all instances launch or you cancel the request. ValidUntil *time.Time `type:"timestamp"` } @@ -99765,6 +108837,10 @@ type SpotOptions struct { // the number of Spot pools that you specify. InstancePoolsToUseCount *int64 `locationName:"instancePoolsToUseCount" type:"integer"` + // The strategies for managing your workloads on your Spot Instances that will + // be interrupted. Currently only the capacity rebalance strategy is available. + MaintenanceStrategies *FleetSpotMaintenanceStrategies `locationName:"maintenanceStrategies" type:"structure"` + // The maximum amount per hour for Spot Instances that you're willing to pay. MaxTotalPrice *string `locationName:"maxTotalPrice" type:"string"` @@ -99809,6 +108885,12 @@ func (s *SpotOptions) SetInstancePoolsToUseCount(v int64) *SpotOptions { return s } +// SetMaintenanceStrategies sets the MaintenanceStrategies field's value. +func (s *SpotOptions) SetMaintenanceStrategies(v *FleetSpotMaintenanceStrategies) *SpotOptions { + s.MaintenanceStrategies = v + return s +} + // SetMaxTotalPrice sets the MaxTotalPrice field's value. func (s *SpotOptions) SetMaxTotalPrice(v string) *SpotOptions { s.MaxTotalPrice = &v @@ -99861,6 +108943,10 @@ type SpotOptionsRequest struct { // across the number of Spot pools that you specify. InstancePoolsToUseCount *int64 `type:"integer"` + // The strategies for managing your Spot Instances that are at an elevated risk + // of being interrupted. + MaintenanceStrategies *FleetSpotMaintenanceStrategiesRequest `type:"structure"` + // The maximum amount per hour for Spot Instances that you're willing to pay. MaxTotalPrice *string `type:"string"` @@ -99905,6 +108991,12 @@ func (s *SpotOptionsRequest) SetInstancePoolsToUseCount(v int64) *SpotOptionsReq return s } +// SetMaintenanceStrategies sets the MaintenanceStrategies field's value. +func (s *SpotOptionsRequest) SetMaintenanceStrategies(v *FleetSpotMaintenanceStrategiesRequest) *SpotOptionsRequest { + s.MaintenanceStrategies = v + return s +} + // SetMaxTotalPrice sets the MaxTotalPrice field's value. func (s *SpotOptionsRequest) SetMaxTotalPrice(v string) *SpotOptionsRequest { s.MaxTotalPrice = &v @@ -100052,8 +109144,7 @@ type StaleIpPermission struct { // The IP ranges. Not applicable for stale security group rules. IpRanges []*string `locationName:"ipRanges" locationNameList:"item" type:"list"` - // The prefix list IDs for an AWS service. Not applicable for stale security - // group rules. + // The prefix list IDs. Not applicable for stale security group rules. PrefixListIds []*string `locationName:"prefixListIds" locationNameList:"item" type:"list"` // The end of the port range for the TCP and UDP protocols, or an ICMP type @@ -100425,7 +109516,7 @@ type StopInstancesInput struct { // Hibernates the instance if the instance was enabled for hibernation at launch. // If the instance cannot hibernate successfully, a normal shutdown occurs. - // For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. // // Default: false @@ -100585,12 +109676,20 @@ type Subnet struct { // The IPv4 CIDR block assigned to the subnet. CidrBlock *string `locationName:"cidrBlock" type:"string"` + // The customer-owned IPv4 address pool associated with the subnet. + CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` + // Indicates whether this is the default subnet for the Availability Zone. DefaultForAz *bool `locationName:"defaultForAz" type:"boolean"` // Information about the IPv6 CIDR blocks associated with the subnet. Ipv6CidrBlockAssociationSet []*SubnetIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociationSet" locationNameList:"item" type:"list"` + // Indicates whether a network interface created in this subnet (including a + // network interface created by RunInstances) receives a customer-owned IPv4 + // address. + MapCustomerOwnedIpOnLaunch *bool `locationName:"mapCustomerOwnedIpOnLaunch" type:"boolean"` + // Indicates whether instances launched in this subnet receive a public IPv4 // address. MapPublicIpOnLaunch *bool `locationName:"mapPublicIpOnLaunch" type:"boolean"` @@ -100657,6 +109756,12 @@ func (s *Subnet) SetCidrBlock(v string) *Subnet { return s } +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *Subnet) SetCustomerOwnedIpv4Pool(v string) *Subnet { + s.CustomerOwnedIpv4Pool = &v + return s +} + // SetDefaultForAz sets the DefaultForAz field's value. func (s *Subnet) SetDefaultForAz(v bool) *Subnet { s.DefaultForAz = &v @@ -100669,6 +109774,12 @@ func (s *Subnet) SetIpv6CidrBlockAssociationSet(v []*SubnetIpv6CidrBlockAssociat return s } +// SetMapCustomerOwnedIpOnLaunch sets the MapCustomerOwnedIpOnLaunch field's value. +func (s *Subnet) SetMapCustomerOwnedIpOnLaunch(v bool) *Subnet { + s.MapCustomerOwnedIpOnLaunch = &v + return s +} + // SetMapPublicIpOnLaunch sets the MapPublicIpOnLaunch field's value. func (s *Subnet) SetMapPublicIpOnLaunch(v bool) *Subnet { s.MapPublicIpOnLaunch = &v @@ -100969,11 +110080,17 @@ type TagSpecification struct { _ struct{} `type:"structure"` // The type of resource to tag. Currently, the resource types that support tagging - // on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host - // | fleet | fpga-image | instance | key-pair | launch-template | placement-group - // | snapshot | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target + // on creation are: capacity-reservation | carrier-gateway | client-vpn-endpoint + // | customer-gateway | dedicated-host | dhcp-options | export-image-task | + // export-instance-task | fleet | fpga-image | host-reservation | import-image-task + // | import-snapshot-task | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 + // | key-pair | launch-template | placement-group | prefix-list | natgateway + // | network-acl | route-table | security-group | spot-fleet-request | spot-instances-request + // | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target // | transit-gateway | transit-gateway-attachment | transit-gateway-route-table - // | volume. + // | volume |vpc | vpc-peering-connection | vpc-endpoint (for interface and + // gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink) | vpc-flow-log + // | vpn-connection | vpn-gateway. // // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` @@ -101017,7 +110134,8 @@ func (s *TagSpecification) SetTags(v []*Tag) *TagSpecification { // reaches the maximum amount that you're willing to pay. When the maximum amount // you're willing to pay is reached, the fleet stops launching instances even // if it hasn’t met the target capacity. The MaxTotalPrice parameters are -// located in and +// located in OnDemandOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_OnDemandOptions.html) +// and SpotOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotOptions) type TargetCapacitySpecification struct { _ struct{} `type:"structure"` @@ -101083,7 +110201,8 @@ func (s *TargetCapacitySpecification) SetTotalTargetCapacity(v int64) *TargetCap // instances until it reaches the maximum amount that you're willing to pay. // When the maximum amount you're willing to pay is reached, the fleet stops // launching instances even if it hasn’t met the target capacity. The MaxTotalPrice -// parameters are located in and . +// parameters are located in OnDemandOptionsRequest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_OnDemandOptionsRequest) +// and SpotOptionsRequest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotOptionsRequest). type TargetCapacitySpecificationRequest struct { _ struct{} `type:"structure"` @@ -102169,7 +111288,7 @@ type TransitGatewayAssociation struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The state of the association. @@ -102238,10 +111357,10 @@ type TransitGatewayAttachment struct { // The ID of the AWS account that owns the resource. ResourceOwnerId *string `locationName:"resourceOwnerId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` - // The attachment state. + // The attachment state. Note that the initiating state has been deprecated. State *string `locationName:"state" type:"string" enum:"TransitGatewayAttachmentState"` // The tags for the attachment. @@ -102951,7 +112070,8 @@ type TransitGatewayPeeringAttachment struct { // Information about the requester transit gateway. RequesterTgwInfo *PeeringTgwInfo `locationName:"requesterTgwInfo" type:"structure"` - // The state of the transit gateway peering attachment. + // The state of the transit gateway peering attachment. Note that the initiating + // state has been deprecated. State *string `locationName:"state" type:"string" enum:"TransitGatewayAttachmentState"` // The status of the transit gateway peering attachment. @@ -103016,6 +112136,117 @@ func (s *TransitGatewayPeeringAttachment) SetTransitGatewayAttachmentId(v string return s } +// Describes a transit gateway prefix list attachment. +type TransitGatewayPrefixListAttachment struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. Note that the tgw-peering resource type has been deprecated. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayPrefixListAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayPrefixListAttachment) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayPrefixListAttachment) SetResourceId(v string) *TransitGatewayPrefixListAttachment { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayPrefixListAttachment) SetResourceType(v string) *TransitGatewayPrefixListAttachment { + s.ResourceType = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayPrefixListAttachment) SetTransitGatewayAttachmentId(v string) *TransitGatewayPrefixListAttachment { + s.TransitGatewayAttachmentId = &v + return s +} + +// Describes a prefix list reference. +type TransitGatewayPrefixListReference struct { + _ struct{} `type:"structure"` + + // Indicates whether traffic that matches this route is dropped. + Blackhole *bool `locationName:"blackhole" type:"boolean"` + + // The ID of the prefix list. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The ID of the prefix list owner. + PrefixListOwnerId *string `locationName:"prefixListOwnerId" type:"string"` + + // The state of the prefix list reference. + State *string `locationName:"state" type:"string" enum:"TransitGatewayPrefixListReferenceState"` + + // Information about the transit gateway attachment. + TransitGatewayAttachment *TransitGatewayPrefixListAttachment `locationName:"transitGatewayAttachment" type:"structure"` + + // The ID of the transit gateway route table. + TransitGatewayRouteTableId *string `locationName:"transitGatewayRouteTableId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayPrefixListReference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayPrefixListReference) GoString() string { + return s.String() +} + +// SetBlackhole sets the Blackhole field's value. +func (s *TransitGatewayPrefixListReference) SetBlackhole(v bool) *TransitGatewayPrefixListReference { + s.Blackhole = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *TransitGatewayPrefixListReference) SetPrefixListId(v string) *TransitGatewayPrefixListReference { + s.PrefixListId = &v + return s +} + +// SetPrefixListOwnerId sets the PrefixListOwnerId field's value. +func (s *TransitGatewayPrefixListReference) SetPrefixListOwnerId(v string) *TransitGatewayPrefixListReference { + s.PrefixListOwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayPrefixListReference) SetState(v string) *TransitGatewayPrefixListReference { + s.State = &v + return s +} + +// SetTransitGatewayAttachment sets the TransitGatewayAttachment field's value. +func (s *TransitGatewayPrefixListReference) SetTransitGatewayAttachment(v *TransitGatewayPrefixListAttachment) *TransitGatewayPrefixListReference { + s.TransitGatewayAttachment = v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *TransitGatewayPrefixListReference) SetTransitGatewayRouteTableId(v string) *TransitGatewayPrefixListReference { + s.TransitGatewayRouteTableId = &v + return s +} + // Describes route propagation. type TransitGatewayPropagation struct { _ struct{} `type:"structure"` @@ -103023,7 +112254,7 @@ type TransitGatewayPropagation struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The state. @@ -103082,28 +112313,28 @@ type TransitGatewayRequestOptions struct { // A private Autonomous System Number (ASN) for the Amazon side of a BGP session. // The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 - // for 32-bit ASNs. + // for 32-bit ASNs. The default is 64512. AmazonSideAsn *int64 `type:"long"` - // Enable or disable automatic acceptance of attachment requests. The default - // is disable. + // Enable or disable automatic acceptance of attachment requests. Disabled by + // default. AutoAcceptSharedAttachments *string `type:"string" enum:"AutoAcceptSharedAttachmentsValue"` // Enable or disable automatic association with the default association route - // table. The default is enable. + // table. Enabled by default. DefaultRouteTableAssociation *string `type:"string" enum:"DefaultRouteTableAssociationValue"` // Enable or disable automatic propagation of routes to the default propagation - // route table. The default is enable. + // route table. Enabled by default. DefaultRouteTablePropagation *string `type:"string" enum:"DefaultRouteTablePropagationValue"` - // Enable or disable DNS support. + // Enable or disable DNS support. Enabled by default. DnsSupport *string `type:"string" enum:"DnsSupportValue"` // Indicates whether multicast is enabled on the transit gateway MulticastSupport *string `type:"string" enum:"MulticastSupportValue"` - // Enable or disable Equal Cost Multipath Protocol support. + // Enable or disable Equal Cost Multipath Protocol support. Enabled by default. VpnEcmpSupport *string `type:"string" enum:"VpnEcmpSupportValue"` } @@ -103166,6 +112397,9 @@ type TransitGatewayRoute struct { // The CIDR block used for destination matches. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + // The ID of the prefix list used for destination matches. + PrefixListId *string `locationName:"prefixListId" type:"string"` + // The state of the route. State *string `locationName:"state" type:"string" enum:"TransitGatewayRouteState"` @@ -103192,6 +112426,12 @@ func (s *TransitGatewayRoute) SetDestinationCidrBlock(v string) *TransitGatewayR return s } +// SetPrefixListId sets the PrefixListId field's value. +func (s *TransitGatewayRoute) SetPrefixListId(v string) *TransitGatewayRoute { + s.PrefixListId = &v + return s +} + // SetState sets the State field's value. func (s *TransitGatewayRoute) SetState(v string) *TransitGatewayRoute { s.State = &v @@ -103217,7 +112457,7 @@ type TransitGatewayRouteAttachment struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The ID of the attachment. @@ -103339,7 +112579,7 @@ type TransitGatewayRouteTableAssociation struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The state of the association. @@ -103390,7 +112630,7 @@ type TransitGatewayRouteTablePropagation struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The type of resource. + // The type of resource. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The state of the resource. @@ -103444,7 +112684,8 @@ type TransitGatewayVpcAttachment struct { // The VPC attachment options. Options *TransitGatewayVpcAttachmentOptions `locationName:"options" type:"structure"` - // The state of the VPC attachment. + // The state of the VPC attachment. Note that the initiating state has been + // deprecated. State *string `locationName:"state" type:"string" enum:"TransitGatewayAttachmentState"` // The IDs of the subnets. @@ -103534,10 +112775,13 @@ func (s *TransitGatewayVpcAttachment) SetVpcOwnerId(v string) *TransitGatewayVpc type TransitGatewayVpcAttachmentOptions struct { _ struct{} `type:"structure"` + // Indicates whether appliance mode support is enabled. + ApplianceModeSupport *string `locationName:"applianceModeSupport" type:"string" enum:"ApplianceModeSupportValue"` + // Indicates whether DNS support is enabled. DnsSupport *string `locationName:"dnsSupport" type:"string" enum:"DnsSupportValue"` - // Indicates whether IPv6 support is enabled. + // Indicates whether IPv6 support is disabled. Ipv6Support *string `locationName:"ipv6Support" type:"string" enum:"Ipv6SupportValue"` } @@ -103551,6 +112795,12 @@ func (s TransitGatewayVpcAttachmentOptions) GoString() string { return s.String() } +// SetApplianceModeSupport sets the ApplianceModeSupport field's value. +func (s *TransitGatewayVpcAttachmentOptions) SetApplianceModeSupport(v string) *TransitGatewayVpcAttachmentOptions { + s.ApplianceModeSupport = &v + return s +} + // SetDnsSupport sets the DnsSupport field's value. func (s *TransitGatewayVpcAttachmentOptions) SetDnsSupport(v string) *TransitGatewayVpcAttachmentOptions { s.DnsSupport = &v @@ -103567,6 +112817,9 @@ func (s *TransitGatewayVpcAttachmentOptions) SetIpv6Support(v string) *TransitGa type TunnelOption struct { _ struct{} `type:"structure"` + // The action to take after a DPD timeout occurs. + DpdTimeoutAction *string `locationName:"dpdTimeoutAction" type:"string"` + // The number of seconds after which a DPD timeout occurs. DpdTimeoutSeconds *int64 `locationName:"dpdTimeoutSeconds" type:"integer"` @@ -103617,8 +112870,14 @@ type TunnelOption struct { // The number of packets in an IKE replay window. ReplayWindowSize *int64 `locationName:"replayWindowSize" type:"integer"` - // The range of inside IP addresses for the tunnel. + // The action to take when the establishing the VPN tunnels for a VPN connection. + StartupAction *string `locationName:"startupAction" type:"string"` + + // The range of inside IPv4 addresses for the tunnel. TunnelInsideCidr *string `locationName:"tunnelInsideCidr" type:"string"` + + // The range of inside IPv6 addresses for the tunnel. + TunnelInsideIpv6Cidr *string `locationName:"tunnelInsideIpv6Cidr" type:"string"` } // String returns the string representation @@ -103631,6 +112890,12 @@ func (s TunnelOption) GoString() string { return s.String() } +// SetDpdTimeoutAction sets the DpdTimeoutAction field's value. +func (s *TunnelOption) SetDpdTimeoutAction(v string) *TunnelOption { + s.DpdTimeoutAction = &v + return s +} + // SetDpdTimeoutSeconds sets the DpdTimeoutSeconds field's value. func (s *TunnelOption) SetDpdTimeoutSeconds(v int64) *TunnelOption { s.DpdTimeoutSeconds = &v @@ -103721,12 +112986,24 @@ func (s *TunnelOption) SetReplayWindowSize(v int64) *TunnelOption { return s } +// SetStartupAction sets the StartupAction field's value. +func (s *TunnelOption) SetStartupAction(v string) *TunnelOption { + s.StartupAction = &v + return s +} + // SetTunnelInsideCidr sets the TunnelInsideCidr field's value. func (s *TunnelOption) SetTunnelInsideCidr(v string) *TunnelOption { s.TunnelInsideCidr = &v return s } +// SetTunnelInsideIpv6Cidr sets the TunnelInsideIpv6Cidr field's value. +func (s *TunnelOption) SetTunnelInsideIpv6Cidr(v string) *TunnelOption { + s.TunnelInsideIpv6Cidr = &v + return s +} + type UnassignIpv6AddressesInput struct { _ struct{} `type:"structure"` @@ -104276,11 +113553,11 @@ func (s *UpdateSecurityGroupRuleDescriptionsIngressOutput) SetReturn(v bool) *Up return s } -// Describes the S3 bucket for the disk image. +// Describes the Amazon S3 bucket for the disk image. type UserBucket struct { _ struct{} `type:"structure"` - // The name of the S3 bucket where the disk image is located. + // The name of the Amazon S3 bucket where the disk image is located. S3Bucket *string `type:"string"` // The file name of the disk image. @@ -104309,11 +113586,11 @@ func (s *UserBucket) SetS3Key(v string) *UserBucket { return s } -// Describes the S3 bucket for the disk image. +// Describes the Amazon S3 bucket for the disk image. type UserBucketDetails struct { _ struct{} `type:"structure"` - // The S3 bucket from which the disk image was created. + // The Amazon S3 bucket from which the disk image was created. S3Bucket *string `locationName:"s3Bucket" type:"string"` // The file name of the disk image. @@ -104475,12 +113752,11 @@ type VCpuInfo struct { // The default number of vCPUs for the instance type. DefaultVCpus *int64 `locationName:"defaultVCpus" type:"integer"` - // List of the valid number of cores that can be configured for the instance - // type. + // The valid number of cores that can be configured for the instance type. ValidCores []*int64 `locationName:"validCores" locationNameList:"item" type:"list"` - // List of the valid number of threads per core that can be configured for the - // instance type. + // The valid number of threads per core that can be configured for the instance + // type. ValidThreadsPerCore []*int64 `locationName:"validThreadsPerCore" locationNameList:"item" type:"list"` } @@ -104524,6 +113800,70 @@ func (s *VCpuInfo) SetValidThreadsPerCore(v []*int64) *VCpuInfo { return s } +// The error code and error message that is returned for a parameter or parameter +// combination that is not valid when a new launch template or new version of +// a launch template is created. +type ValidationError struct { + _ struct{} `type:"structure"` + + // The error code that indicates why the parameter or parameter combination + // is not valid. For more information about error codes, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + Code *string `locationName:"code" type:"string"` + + // The error message that describes why the parameter or parameter combination + // is not valid. For more information about error messages, see Error Codes + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ValidationError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ValidationError) SetCode(v string) *ValidationError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ValidationError) SetMessage(v string) *ValidationError { + s.Message = &v + return s +} + +// The error codes and error messages that are returned for the parameters or +// parameter combinations that are not valid when a new launch template or new +// version of a launch template is created. +type ValidationWarning struct { + _ struct{} `type:"structure"` + + // The error codes and error messages. + Errors []*ValidationError `locationName:"errorSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ValidationWarning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationWarning) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *ValidationWarning) SetErrors(v []*ValidationError) *ValidationWarning { + s.Errors = v + return s +} + // Describes telemetry for a VPN tunnel. type VgwTelemetry struct { _ struct{} `type:"structure"` @@ -104617,17 +113957,18 @@ type Volume struct { // For Provisioned IOPS SSD volumes, this represents the number of IOPS that // are provisioned for the volume. For General Purpose SSD volumes, this represents // the baseline performance of the volume and the rate at which the volume accumulates - // I/O credits for bursting. For more information, see Amazon EBS Volume Types + // I/O credits for bursting. For more information, see Amazon EBS volume types // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS - // for io1 volumes, in most Regions. The maximum IOPS for io1 of 64,000 is guaranteed - // only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000 IOPS + // for io1 and io2 volumes, in most Regions. The maximum IOPS for io1 and io2 + // of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). // Other instance families guarantee performance up to 32,000 IOPS. // - // Condition: This parameter is required for requests to create io1 volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // Condition: This parameter is required for requests to create io1 and io2 + // volumes; it is not used in requests to create gp2, st1, sc1, or standard + // volumes. Iops *int64 `locationName:"iops" type:"integer"` // The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) @@ -104635,6 +113976,9 @@ type Volume struct { // key for the volume. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` + // Indicates whether Amazon EBS Multi-Attach is enabled. + MultiAttachEnabled *bool `locationName:"multiAttachEnabled" type:"boolean"` + // The Amazon Resource Name (ARN) of the Outpost. OutpostArn *string `locationName:"outpostArn" type:"string"` @@ -104653,9 +113997,9 @@ type Volume struct { // The ID of the volume. VolumeId *string `locationName:"volumeId" type:"string"` - // The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned - // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard - // for Magnetic volumes. + // The volume type. This can be gp2 for General Purpose SSD, io1 or io2 for + // Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, + // or standard for Magnetic volumes. VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` } @@ -104711,6 +114055,12 @@ func (s *Volume) SetKmsKeyId(v string) *Volume { return s } +// SetMultiAttachEnabled sets the MultiAttachEnabled field's value. +func (s *Volume) SetMultiAttachEnabled(v bool) *Volume { + s.MultiAttachEnabled = &v + return s +} + // SetOutpostArn sets the OutpostArn field's value. func (s *Volume) SetOutpostArn(v string) *Volume { s.OutpostArn = &v @@ -104877,7 +114227,7 @@ type VolumeModification struct { // The original IOPS rate of the volume. OriginalIops *int64 `locationName:"originalIops" type:"integer"` - // The original size of the volume. + // The original size of the volume, in GiB. OriginalSize *int64 `locationName:"originalSize" type:"integer"` // The original EBS volume type of the volume. @@ -105038,6 +114388,39 @@ func (s *VolumeStatusAction) SetEventType(v string) *VolumeStatusAction { return s } +// Information about the instances to which the volume is attached. +type VolumeStatusAttachmentStatus struct { + _ struct{} `type:"structure"` + + // The ID of the attached instance. + InstanceId *string `locationName:"instanceId" type:"string"` + + // The maximum IOPS supported by the attached instance. + IoPerformance *string `locationName:"ioPerformance" type:"string"` +} + +// String returns the string representation +func (s VolumeStatusAttachmentStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VolumeStatusAttachmentStatus) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *VolumeStatusAttachmentStatus) SetInstanceId(v string) *VolumeStatusAttachmentStatus { + s.InstanceId = &v + return s +} + +// SetIoPerformance sets the IoPerformance field's value. +func (s *VolumeStatusAttachmentStatus) SetIoPerformance(v string) *VolumeStatusAttachmentStatus { + s.IoPerformance = &v + return s +} + // Describes a volume status. type VolumeStatusDetails struct { _ struct{} `type:"structure"` @@ -105084,6 +114467,9 @@ type VolumeStatusEvent struct { // The type of this event. EventType *string `locationName:"eventType" type:"string"` + // The ID of the instance associated with the event. + InstanceId *string `locationName:"instanceId" type:"string"` + // The latest end time of the event. NotAfter *time.Time `locationName:"notAfter" type:"timestamp"` @@ -105119,6 +114505,12 @@ func (s *VolumeStatusEvent) SetEventType(v string) *VolumeStatusEvent { return s } +// SetInstanceId sets the InstanceId field's value. +func (s *VolumeStatusEvent) SetInstanceId(v string) *VolumeStatusEvent { + s.InstanceId = &v + return s +} + // SetNotAfter sets the NotAfter field's value. func (s *VolumeStatusEvent) SetNotAfter(v time.Time) *VolumeStatusEvent { s.NotAfter = &v @@ -105171,6 +114563,9 @@ type VolumeStatusItem struct { // The details of the operation. Actions []*VolumeStatusAction `locationName:"actionsSet" locationNameList:"item" type:"list"` + // Information about the instances to which the volume is attached. + AttachmentStatuses []*VolumeStatusAttachmentStatus `locationName:"attachmentStatuses" locationNameList:"item" type:"list"` + // The Availability Zone of the volume. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` @@ -105203,6 +114598,12 @@ func (s *VolumeStatusItem) SetActions(v []*VolumeStatusAction) *VolumeStatusItem return s } +// SetAttachmentStatuses sets the AttachmentStatuses field's value. +func (s *VolumeStatusItem) SetAttachmentStatuses(v []*VolumeStatusAttachmentStatus) *VolumeStatusItem { + s.AttachmentStatuses = v + return s +} + // SetAvailabilityZone sets the AvailabilityZone field's value. func (s *VolumeStatusItem) SetAvailabilityZone(v string) *VolumeStatusItem { s.AvailabilityZone = &v @@ -105243,8 +114644,7 @@ type Vpc struct { // Information about the IPv4 CIDR blocks associated with the VPC. CidrBlockAssociationSet []*VpcCidrBlockAssociation `locationName:"cidrBlockAssociationSet" locationNameList:"item" type:"list"` - // The ID of the set of DHCP options you've associated with the VPC (or default - // if the default options are associated with the VPC). + // The ID of the set of DHCP options you've associated with the VPC. DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` // The allowed tenancy of instances launched into the VPC. @@ -105750,7 +115150,11 @@ type VpcIpv6CidrBlockAssociation struct { // Information about the state of the CIDR block. Ipv6CidrBlockState *VpcCidrBlockState `locationName:"ipv6CidrBlockState" type:"structure"` - // The name of the location from which we advertise the IPV6 CIDR block. + // The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated. + Ipv6Pool *string `locationName:"ipv6Pool" type:"string"` + + // The name of the unique set of Availability Zones, Local Zones, or Wavelength + // Zones from which AWS advertises IP addresses, for example, us-east-1-wl1-bos-wlz-1. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` } @@ -105782,6 +115186,12 @@ func (s *VpcIpv6CidrBlockAssociation) SetIpv6CidrBlockState(v *VpcCidrBlockState return s } +// SetIpv6Pool sets the Ipv6Pool field's value. +func (s *VpcIpv6CidrBlockAssociation) SetIpv6Pool(v string) *VpcIpv6CidrBlockAssociation { + s.Ipv6Pool = &v + return s +} + // SetNetworkBorderGroup sets the NetworkBorderGroup field's value. func (s *VpcIpv6CidrBlockAssociation) SetNetworkBorderGroup(v string) *VpcIpv6CidrBlockAssociation { s.NetworkBorderGroup = &v @@ -106150,10 +115560,25 @@ type VpnConnectionOptions struct { // Indicates whether acceleration is enabled for the VPN connection. EnableAcceleration *bool `locationName:"enableAcceleration" type:"boolean"` + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIpv4NetworkCidr *string `locationName:"localIpv4NetworkCidr" type:"string"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIpv6NetworkCidr *string `locationName:"localIpv6NetworkCidr" type:"string"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + RemoteIpv4NetworkCidr *string `locationName:"remoteIpv4NetworkCidr" type:"string"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + RemoteIpv6NetworkCidr *string `locationName:"remoteIpv6NetworkCidr" type:"string"` + // Indicates whether the VPN connection uses static routes only. Static routes // must be used for devices that don't support BGP. StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + // Indicates whether the VPN tunnels process IPv4 or IPv6 traffic. + TunnelInsideIpVersion *string `locationName:"tunnelInsideIpVersion" type:"string" enum:"TunnelInsideIpVersion"` + // Indicates the VPN tunnel options. TunnelOptions []*TunnelOption `locationName:"tunnelOptionSet" locationNameList:"item" type:"list"` } @@ -106174,12 +115599,42 @@ func (s *VpnConnectionOptions) SetEnableAcceleration(v bool) *VpnConnectionOptio return s } +// SetLocalIpv4NetworkCidr sets the LocalIpv4NetworkCidr field's value. +func (s *VpnConnectionOptions) SetLocalIpv4NetworkCidr(v string) *VpnConnectionOptions { + s.LocalIpv4NetworkCidr = &v + return s +} + +// SetLocalIpv6NetworkCidr sets the LocalIpv6NetworkCidr field's value. +func (s *VpnConnectionOptions) SetLocalIpv6NetworkCidr(v string) *VpnConnectionOptions { + s.LocalIpv6NetworkCidr = &v + return s +} + +// SetRemoteIpv4NetworkCidr sets the RemoteIpv4NetworkCidr field's value. +func (s *VpnConnectionOptions) SetRemoteIpv4NetworkCidr(v string) *VpnConnectionOptions { + s.RemoteIpv4NetworkCidr = &v + return s +} + +// SetRemoteIpv6NetworkCidr sets the RemoteIpv6NetworkCidr field's value. +func (s *VpnConnectionOptions) SetRemoteIpv6NetworkCidr(v string) *VpnConnectionOptions { + s.RemoteIpv6NetworkCidr = &v + return s +} + // SetStaticRoutesOnly sets the StaticRoutesOnly field's value. func (s *VpnConnectionOptions) SetStaticRoutesOnly(v bool) *VpnConnectionOptions { s.StaticRoutesOnly = &v return s } +// SetTunnelInsideIpVersion sets the TunnelInsideIpVersion field's value. +func (s *VpnConnectionOptions) SetTunnelInsideIpVersion(v string) *VpnConnectionOptions { + s.TunnelInsideIpVersion = &v + return s +} + // SetTunnelOptions sets the TunnelOptions field's value. func (s *VpnConnectionOptions) SetTunnelOptions(v []*TunnelOption) *VpnConnectionOptions { s.TunnelOptions = v @@ -106195,6 +115650,26 @@ type VpnConnectionOptionsSpecification struct { // Default: false EnableAcceleration *bool `type:"boolean"` + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: 0.0.0.0/0 + LocalIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: ::/0 + LocalIpv6NetworkCidr *string `type:"string"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + // + // Default: 0.0.0.0/0 + RemoteIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + // + // Default: ::/0 + RemoteIpv6NetworkCidr *string `type:"string"` + // Indicate whether the VPN connection uses static routes only. If you are creating // a VPN connection for a device that does not support BGP, you must specify // true. Use CreateVpnConnectionRoute to create a static route. @@ -106202,6 +115677,11 @@ type VpnConnectionOptionsSpecification struct { // Default: false StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + // Indicate whether the VPN tunnels process IPv4 or IPv6 traffic. + // + // Default: ipv4 + TunnelInsideIpVersion *string `type:"string" enum:"TunnelInsideIpVersion"` + // The tunnel options for the VPN connection. TunnelOptions []*VpnTunnelOptionsSpecification `type:"list"` } @@ -106222,12 +115702,42 @@ func (s *VpnConnectionOptionsSpecification) SetEnableAcceleration(v bool) *VpnCo return s } +// SetLocalIpv4NetworkCidr sets the LocalIpv4NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetLocalIpv4NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.LocalIpv4NetworkCidr = &v + return s +} + +// SetLocalIpv6NetworkCidr sets the LocalIpv6NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetLocalIpv6NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.LocalIpv6NetworkCidr = &v + return s +} + +// SetRemoteIpv4NetworkCidr sets the RemoteIpv4NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetRemoteIpv4NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.RemoteIpv4NetworkCidr = &v + return s +} + +// SetRemoteIpv6NetworkCidr sets the RemoteIpv6NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetRemoteIpv6NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.RemoteIpv6NetworkCidr = &v + return s +} + // SetStaticRoutesOnly sets the StaticRoutesOnly field's value. func (s *VpnConnectionOptionsSpecification) SetStaticRoutesOnly(v bool) *VpnConnectionOptionsSpecification { s.StaticRoutesOnly = &v return s } +// SetTunnelInsideIpVersion sets the TunnelInsideIpVersion field's value. +func (s *VpnConnectionOptionsSpecification) SetTunnelInsideIpVersion(v string) *VpnConnectionOptionsSpecification { + s.TunnelInsideIpVersion = &v + return s +} + // SetTunnelOptions sets the TunnelOptions field's value. func (s *VpnConnectionOptionsSpecification) SetTunnelOptions(v []*VpnTunnelOptionsSpecification) *VpnConnectionOptionsSpecification { s.TunnelOptions = v @@ -106355,10 +115865,18 @@ func (s *VpnStaticRoute) SetState(v string) *VpnStaticRoute { return s } -// The tunnel options for a VPN connection. +// The tunnel options for a single VPN tunnel. type VpnTunnelOptionsSpecification struct { _ struct{} `type:"structure"` + // The action to take after DPD timeout occurs. Specify restart to restart the + // IKE initiation. Specify clear to end the IKE session. + // + // Valid Values: clear | none | restart + // + // Default: clear + DPDTimeoutAction *string `type:"string"` + // The number of seconds after which a DPD timeout occurs. // // Constraints: A value between 0 and 30. @@ -106374,19 +115892,19 @@ type VpnTunnelOptionsSpecification struct { // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel // for phase 1 IKE negotiations. // - // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 Phase1DHGroupNumbers []*Phase1DHGroupNumbersRequestListValue `locationName:"Phase1DHGroupNumber" locationNameList:"item" type:"list"` // One or more encryption algorithms that are permitted for the VPN tunnel for // phase 1 IKE negotiations. // - // Valid values: AES128 | AES256 + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsRequestListValue `locationName:"Phase1EncryptionAlgorithm" locationNameList:"item" type:"list"` // One or more integrity algorithms that are permitted for the VPN tunnel for // phase 1 IKE negotiations. // - // Valid values: SHA1 | SHA2-256 + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsRequestListValue `locationName:"Phase1IntegrityAlgorithm" locationNameList:"item" type:"list"` // The lifetime for phase 1 of the IKE negotiation, in seconds. @@ -106399,19 +115917,19 @@ type VpnTunnelOptionsSpecification struct { // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel // for phase 2 IKE negotiations. // - // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 Phase2DHGroupNumbers []*Phase2DHGroupNumbersRequestListValue `locationName:"Phase2DHGroupNumber" locationNameList:"item" type:"list"` // One or more encryption algorithms that are permitted for the VPN tunnel for // phase 2 IKE negotiations. // - // Valid values: AES128 | AES256 + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsRequestListValue `locationName:"Phase2EncryptionAlgorithm" locationNameList:"item" type:"list"` // One or more integrity algorithms that are permitted for the VPN tunnel for // phase 2 IKE negotiations. // - // Valid values: SHA1 | SHA2-256 + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsRequestListValue `locationName:"Phase2IntegrityAlgorithm" locationNameList:"item" type:"list"` // The lifetime for phase 2 of the IKE negotiation, in seconds. @@ -106454,7 +115972,16 @@ type VpnTunnelOptionsSpecification struct { // Default: 1024 ReplayWindowSize *int64 `type:"integer"` - // The range of inside IP addresses for the tunnel. Any specified CIDR blocks + // The action to take when the establishing the tunnel for the VPN connection. + // By default, your customer gateway device must initiate the IKE negotiation + // and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. + // + // Valid Values: add | start + // + // Default: add + StartupAction *string `type:"string"` + + // The range of inside IPv4 addresses for the tunnel. Any specified CIDR blocks // must be unique across all VPN connections that use the same virtual private // gateway. // @@ -106475,6 +116002,12 @@ type VpnTunnelOptionsSpecification struct { // // * 169.254.169.252/30 TunnelInsideCidr *string `type:"string"` + + // The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same transit gateway. + // + // Constraints: A size /126 CIDR block from the local fd00::/8 range. + TunnelInsideIpv6Cidr *string `type:"string"` } // String returns the string representation @@ -106487,6 +116020,12 @@ func (s VpnTunnelOptionsSpecification) GoString() string { return s.String() } +// SetDPDTimeoutAction sets the DPDTimeoutAction field's value. +func (s *VpnTunnelOptionsSpecification) SetDPDTimeoutAction(v string) *VpnTunnelOptionsSpecification { + s.DPDTimeoutAction = &v + return s +} + // SetDPDTimeoutSeconds sets the DPDTimeoutSeconds field's value. func (s *VpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *VpnTunnelOptionsSpecification { s.DPDTimeoutSeconds = &v @@ -106571,16 +116110,28 @@ func (s *VpnTunnelOptionsSpecification) SetReplayWindowSize(v int64) *VpnTunnelO return s } +// SetStartupAction sets the StartupAction field's value. +func (s *VpnTunnelOptionsSpecification) SetStartupAction(v string) *VpnTunnelOptionsSpecification { + s.StartupAction = &v + return s +} + // SetTunnelInsideCidr sets the TunnelInsideCidr field's value. func (s *VpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *VpnTunnelOptionsSpecification { s.TunnelInsideCidr = &v return s } +// SetTunnelInsideIpv6Cidr sets the TunnelInsideIpv6Cidr field's value. +func (s *VpnTunnelOptionsSpecification) SetTunnelInsideIpv6Cidr(v string) *VpnTunnelOptionsSpecification { + s.TunnelInsideIpv6Cidr = &v + return s +} + type WithdrawByoipCidrInput struct { _ struct{} `type:"structure"` - // The public IPv4 address range, in CIDR notation. + // The address range, in CIDR notation. // // Cidr is a required field Cidr *string `type:"string" required:"true"` @@ -106658,6 +116209,14 @@ const ( AccountAttributeNameDefaultVpc = "default-vpc" ) +// AccountAttributeName_Values returns all elements of the AccountAttributeName enum +func AccountAttributeName_Values() []string { + return []string{ + AccountAttributeNameSupportedPlatforms, + AccountAttributeNameDefaultVpc, + } +} + const ( // ActivityStatusError is a ActivityStatus enum value ActivityStatusError = "error" @@ -106672,6 +116231,16 @@ const ( ActivityStatusFulfilled = "fulfilled" ) +// ActivityStatus_Values returns all elements of the ActivityStatus enum +func ActivityStatus_Values() []string { + return []string{ + ActivityStatusError, + ActivityStatusPendingFulfillment, + ActivityStatusPendingTermination, + ActivityStatusFulfilled, + } +} + const ( // AffinityDefault is a Affinity enum value AffinityDefault = "default" @@ -106680,6 +116249,14 @@ const ( AffinityHost = "host" ) +// Affinity_Values returns all elements of the Affinity enum +func Affinity_Values() []string { + return []string{ + AffinityDefault, + AffinityHost, + } +} + const ( // AllocationStateAvailable is a AllocationState enum value AllocationStateAvailable = "available" @@ -106700,6 +116277,18 @@ const ( AllocationStatePending = "pending" ) +// AllocationState_Values returns all elements of the AllocationState enum +func AllocationState_Values() []string { + return []string{ + AllocationStateAvailable, + AllocationStateUnderAssessment, + AllocationStatePermanentFailure, + AllocationStateReleased, + AllocationStateReleasedPermanentFailure, + AllocationStatePending, + } +} + const ( // AllocationStrategyLowestPrice is a AllocationStrategy enum value AllocationStrategyLowestPrice = "lowestPrice" @@ -106711,6 +116300,15 @@ const ( AllocationStrategyCapacityOptimized = "capacityOptimized" ) +// AllocationStrategy_Values returns all elements of the AllocationStrategy enum +func AllocationStrategy_Values() []string { + return []string{ + AllocationStrategyLowestPrice, + AllocationStrategyDiversified, + AllocationStrategyCapacityOptimized, + } +} + const ( // AllowsMultipleInstanceTypesOn is a AllowsMultipleInstanceTypes enum value AllowsMultipleInstanceTypesOn = "on" @@ -106719,6 +116317,30 @@ const ( AllowsMultipleInstanceTypesOff = "off" ) +// AllowsMultipleInstanceTypes_Values returns all elements of the AllowsMultipleInstanceTypes enum +func AllowsMultipleInstanceTypes_Values() []string { + return []string{ + AllowsMultipleInstanceTypesOn, + AllowsMultipleInstanceTypesOff, + } +} + +const ( + // ApplianceModeSupportValueEnable is a ApplianceModeSupportValue enum value + ApplianceModeSupportValueEnable = "enable" + + // ApplianceModeSupportValueDisable is a ApplianceModeSupportValue enum value + ApplianceModeSupportValueDisable = "disable" +) + +// ApplianceModeSupportValue_Values returns all elements of the ApplianceModeSupportValue enum +func ApplianceModeSupportValue_Values() []string { + return []string{ + ApplianceModeSupportValueEnable, + ApplianceModeSupportValueDisable, + } +} + const ( // ArchitectureTypeI386 is a ArchitectureType enum value ArchitectureTypeI386 = "i386" @@ -106730,6 +116352,15 @@ const ( ArchitectureTypeArm64 = "arm64" ) +// ArchitectureType_Values returns all elements of the ArchitectureType enum +func ArchitectureType_Values() []string { + return []string{ + ArchitectureTypeI386, + ArchitectureTypeX8664, + ArchitectureTypeArm64, + } +} + const ( // ArchitectureValuesI386 is a ArchitectureValues enum value ArchitectureValuesI386 = "i386" @@ -106741,11 +116372,27 @@ const ( ArchitectureValuesArm64 = "arm64" ) +// ArchitectureValues_Values returns all elements of the ArchitectureValues enum +func ArchitectureValues_Values() []string { + return []string{ + ArchitectureValuesI386, + ArchitectureValuesX8664, + ArchitectureValuesArm64, + } +} + const ( // AssociatedNetworkTypeVpc is a AssociatedNetworkType enum value AssociatedNetworkTypeVpc = "vpc" ) +// AssociatedNetworkType_Values returns all elements of the AssociatedNetworkType enum +func AssociatedNetworkType_Values() []string { + return []string{ + AssociatedNetworkTypeVpc, + } +} + const ( // AssociationStatusCodeAssociating is a AssociationStatusCode enum value AssociationStatusCodeAssociating = "associating" @@ -106763,6 +116410,17 @@ const ( AssociationStatusCodeDisassociated = "disassociated" ) +// AssociationStatusCode_Values returns all elements of the AssociationStatusCode enum +func AssociationStatusCode_Values() []string { + return []string{ + AssociationStatusCodeAssociating, + AssociationStatusCodeAssociated, + AssociationStatusCodeAssociationFailed, + AssociationStatusCodeDisassociating, + AssociationStatusCodeDisassociated, + } +} + const ( // AttachmentStatusAttaching is a AttachmentStatus enum value AttachmentStatusAttaching = "attaching" @@ -106777,6 +116435,16 @@ const ( AttachmentStatusDetached = "detached" ) +// AttachmentStatus_Values returns all elements of the AttachmentStatus enum +func AttachmentStatus_Values() []string { + return []string{ + AttachmentStatusAttaching, + AttachmentStatusAttached, + AttachmentStatusDetaching, + AttachmentStatusDetached, + } +} + const ( // AutoAcceptSharedAttachmentsValueEnable is a AutoAcceptSharedAttachmentsValue enum value AutoAcceptSharedAttachmentsValueEnable = "enable" @@ -106785,6 +116453,14 @@ const ( AutoAcceptSharedAttachmentsValueDisable = "disable" ) +// AutoAcceptSharedAttachmentsValue_Values returns all elements of the AutoAcceptSharedAttachmentsValue enum +func AutoAcceptSharedAttachmentsValue_Values() []string { + return []string{ + AutoAcceptSharedAttachmentsValueEnable, + AutoAcceptSharedAttachmentsValueDisable, + } +} + const ( // AutoPlacementOn is a AutoPlacement enum value AutoPlacementOn = "on" @@ -106793,6 +116469,14 @@ const ( AutoPlacementOff = "off" ) +// AutoPlacement_Values returns all elements of the AutoPlacement enum +func AutoPlacement_Values() []string { + return []string{ + AutoPlacementOn, + AutoPlacementOff, + } +} + const ( // AvailabilityZoneOptInStatusOptInNotRequired is a AvailabilityZoneOptInStatus enum value AvailabilityZoneOptInStatusOptInNotRequired = "opt-in-not-required" @@ -106804,6 +116488,15 @@ const ( AvailabilityZoneOptInStatusNotOptedIn = "not-opted-in" ) +// AvailabilityZoneOptInStatus_Values returns all elements of the AvailabilityZoneOptInStatus enum +func AvailabilityZoneOptInStatus_Values() []string { + return []string{ + AvailabilityZoneOptInStatusOptInNotRequired, + AvailabilityZoneOptInStatusOptedIn, + AvailabilityZoneOptInStatusNotOptedIn, + } +} + const ( // AvailabilityZoneStateAvailable is a AvailabilityZoneState enum value AvailabilityZoneStateAvailable = "available" @@ -106818,6 +116511,16 @@ const ( AvailabilityZoneStateUnavailable = "unavailable" ) +// AvailabilityZoneState_Values returns all elements of the AvailabilityZoneState enum +func AvailabilityZoneState_Values() []string { + return []string{ + AvailabilityZoneStateAvailable, + AvailabilityZoneStateInformation, + AvailabilityZoneStateImpaired, + AvailabilityZoneStateUnavailable, + } +} + const ( // BatchStateSubmitted is a BatchState enum value BatchStateSubmitted = "submitted" @@ -106841,6 +116544,19 @@ const ( BatchStateModifying = "modifying" ) +// BatchState_Values returns all elements of the BatchState enum +func BatchState_Values() []string { + return []string{ + BatchStateSubmitted, + BatchStateActive, + BatchStateCancelled, + BatchStateFailed, + BatchStateCancelledRunning, + BatchStateCancelledTerminating, + BatchStateModifying, + } +} + const ( // BundleTaskStatePending is a BundleTaskState enum value BundleTaskStatePending = "pending" @@ -106864,6 +116580,19 @@ const ( BundleTaskStateFailed = "failed" ) +// BundleTaskState_Values returns all elements of the BundleTaskState enum +func BundleTaskState_Values() []string { + return []string{ + BundleTaskStatePending, + BundleTaskStateWaitingForShutdown, + BundleTaskStateBundling, + BundleTaskStateStoring, + BundleTaskStateCancelling, + BundleTaskStateComplete, + BundleTaskStateFailed, + } +} + const ( // ByoipCidrStateAdvertised is a ByoipCidrState enum value ByoipCidrStateAdvertised = "advertised" @@ -106885,8 +116614,25 @@ const ( // ByoipCidrStateProvisioned is a ByoipCidrState enum value ByoipCidrStateProvisioned = "provisioned" + + // ByoipCidrStateProvisionedNotPubliclyAdvertisable is a ByoipCidrState enum value + ByoipCidrStateProvisionedNotPubliclyAdvertisable = "provisioned-not-publicly-advertisable" ) +// ByoipCidrState_Values returns all elements of the ByoipCidrState enum +func ByoipCidrState_Values() []string { + return []string{ + ByoipCidrStateAdvertised, + ByoipCidrStateDeprovisioned, + ByoipCidrStateFailedDeprovision, + ByoipCidrStateFailedProvision, + ByoipCidrStatePendingDeprovision, + ByoipCidrStatePendingProvision, + ByoipCidrStateProvisioned, + ByoipCidrStateProvisionedNotPubliclyAdvertisable, + } +} + const ( // CancelBatchErrorCodeFleetRequestIdDoesNotExist is a CancelBatchErrorCode enum value CancelBatchErrorCodeFleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist" @@ -106901,6 +116647,16 @@ const ( CancelBatchErrorCodeUnexpectedError = "unexpectedError" ) +// CancelBatchErrorCode_Values returns all elements of the CancelBatchErrorCode enum +func CancelBatchErrorCode_Values() []string { + return []string{ + CancelBatchErrorCodeFleetRequestIdDoesNotExist, + CancelBatchErrorCodeFleetRequestIdMalformed, + CancelBatchErrorCodeFleetRequestNotInCancellableState, + CancelBatchErrorCodeUnexpectedError, + } +} + const ( // CancelSpotInstanceRequestStateActive is a CancelSpotInstanceRequestState enum value CancelSpotInstanceRequestStateActive = "active" @@ -106918,6 +116674,17 @@ const ( CancelSpotInstanceRequestStateCompleted = "completed" ) +// CancelSpotInstanceRequestState_Values returns all elements of the CancelSpotInstanceRequestState enum +func CancelSpotInstanceRequestState_Values() []string { + return []string{ + CancelSpotInstanceRequestStateActive, + CancelSpotInstanceRequestStateOpen, + CancelSpotInstanceRequestStateClosed, + CancelSpotInstanceRequestStateCancelled, + CancelSpotInstanceRequestStateCompleted, + } +} + const ( // CapacityReservationInstancePlatformLinuxUnix is a CapacityReservationInstancePlatform enum value CapacityReservationInstancePlatformLinuxUnix = "Linux/UNIX" @@ -106953,6 +116720,23 @@ const ( CapacityReservationInstancePlatformLinuxwithSqlserverEnterprise = "Linux with SQL Server Enterprise" ) +// CapacityReservationInstancePlatform_Values returns all elements of the CapacityReservationInstancePlatform enum +func CapacityReservationInstancePlatform_Values() []string { + return []string{ + CapacityReservationInstancePlatformLinuxUnix, + CapacityReservationInstancePlatformRedHatEnterpriseLinux, + CapacityReservationInstancePlatformSuselinux, + CapacityReservationInstancePlatformWindows, + CapacityReservationInstancePlatformWindowswithSqlserver, + CapacityReservationInstancePlatformWindowswithSqlserverEnterprise, + CapacityReservationInstancePlatformWindowswithSqlserverStandard, + CapacityReservationInstancePlatformWindowswithSqlserverWeb, + CapacityReservationInstancePlatformLinuxwithSqlserverStandard, + CapacityReservationInstancePlatformLinuxwithSqlserverWeb, + CapacityReservationInstancePlatformLinuxwithSqlserverEnterprise, + } +} + const ( // CapacityReservationPreferenceOpen is a CapacityReservationPreference enum value CapacityReservationPreferenceOpen = "open" @@ -106961,6 +116745,14 @@ const ( CapacityReservationPreferenceNone = "none" ) +// CapacityReservationPreference_Values returns all elements of the CapacityReservationPreference enum +func CapacityReservationPreference_Values() []string { + return []string{ + CapacityReservationPreferenceOpen, + CapacityReservationPreferenceNone, + } +} + const ( // CapacityReservationStateActive is a CapacityReservationState enum value CapacityReservationStateActive = "active" @@ -106978,6 +116770,17 @@ const ( CapacityReservationStateFailed = "failed" ) +// CapacityReservationState_Values returns all elements of the CapacityReservationState enum +func CapacityReservationState_Values() []string { + return []string{ + CapacityReservationStateActive, + CapacityReservationStateExpired, + CapacityReservationStateCancelled, + CapacityReservationStatePending, + CapacityReservationStateFailed, + } +} + const ( // CapacityReservationTenancyDefault is a CapacityReservationTenancy enum value CapacityReservationTenancyDefault = "default" @@ -106986,6 +116789,38 @@ const ( CapacityReservationTenancyDedicated = "dedicated" ) +// CapacityReservationTenancy_Values returns all elements of the CapacityReservationTenancy enum +func CapacityReservationTenancy_Values() []string { + return []string{ + CapacityReservationTenancyDefault, + CapacityReservationTenancyDedicated, + } +} + +const ( + // CarrierGatewayStatePending is a CarrierGatewayState enum value + CarrierGatewayStatePending = "pending" + + // CarrierGatewayStateAvailable is a CarrierGatewayState enum value + CarrierGatewayStateAvailable = "available" + + // CarrierGatewayStateDeleting is a CarrierGatewayState enum value + CarrierGatewayStateDeleting = "deleting" + + // CarrierGatewayStateDeleted is a CarrierGatewayState enum value + CarrierGatewayStateDeleted = "deleted" +) + +// CarrierGatewayState_Values returns all elements of the CarrierGatewayState enum +func CarrierGatewayState_Values() []string { + return []string{ + CarrierGatewayStatePending, + CarrierGatewayStateAvailable, + CarrierGatewayStateDeleting, + CarrierGatewayStateDeleted, + } +} + const ( // ClientCertificateRevocationListStatusCodePending is a ClientCertificateRevocationListStatusCode enum value ClientCertificateRevocationListStatusCodePending = "pending" @@ -106994,14 +116829,34 @@ const ( ClientCertificateRevocationListStatusCodeActive = "active" ) +// ClientCertificateRevocationListStatusCode_Values returns all elements of the ClientCertificateRevocationListStatusCode enum +func ClientCertificateRevocationListStatusCode_Values() []string { + return []string{ + ClientCertificateRevocationListStatusCodePending, + ClientCertificateRevocationListStatusCodeActive, + } +} + const ( // ClientVpnAuthenticationTypeCertificateAuthentication is a ClientVpnAuthenticationType enum value ClientVpnAuthenticationTypeCertificateAuthentication = "certificate-authentication" // ClientVpnAuthenticationTypeDirectoryServiceAuthentication is a ClientVpnAuthenticationType enum value ClientVpnAuthenticationTypeDirectoryServiceAuthentication = "directory-service-authentication" + + // ClientVpnAuthenticationTypeFederatedAuthentication is a ClientVpnAuthenticationType enum value + ClientVpnAuthenticationTypeFederatedAuthentication = "federated-authentication" ) +// ClientVpnAuthenticationType_Values returns all elements of the ClientVpnAuthenticationType enum +func ClientVpnAuthenticationType_Values() []string { + return []string{ + ClientVpnAuthenticationTypeCertificateAuthentication, + ClientVpnAuthenticationTypeDirectoryServiceAuthentication, + ClientVpnAuthenticationTypeFederatedAuthentication, + } +} + const ( // ClientVpnAuthorizationRuleStatusCodeAuthorizing is a ClientVpnAuthorizationRuleStatusCode enum value ClientVpnAuthorizationRuleStatusCodeAuthorizing = "authorizing" @@ -107016,6 +116871,16 @@ const ( ClientVpnAuthorizationRuleStatusCodeRevoking = "revoking" ) +// ClientVpnAuthorizationRuleStatusCode_Values returns all elements of the ClientVpnAuthorizationRuleStatusCode enum +func ClientVpnAuthorizationRuleStatusCode_Values() []string { + return []string{ + ClientVpnAuthorizationRuleStatusCodeAuthorizing, + ClientVpnAuthorizationRuleStatusCodeActive, + ClientVpnAuthorizationRuleStatusCodeFailed, + ClientVpnAuthorizationRuleStatusCodeRevoking, + } +} + const ( // ClientVpnConnectionStatusCodeActive is a ClientVpnConnectionStatusCode enum value ClientVpnConnectionStatusCodeActive = "active" @@ -107030,6 +116895,32 @@ const ( ClientVpnConnectionStatusCodeTerminated = "terminated" ) +// ClientVpnConnectionStatusCode_Values returns all elements of the ClientVpnConnectionStatusCode enum +func ClientVpnConnectionStatusCode_Values() []string { + return []string{ + ClientVpnConnectionStatusCodeActive, + ClientVpnConnectionStatusCodeFailedToTerminate, + ClientVpnConnectionStatusCodeTerminating, + ClientVpnConnectionStatusCodeTerminated, + } +} + +const ( + // ClientVpnEndpointAttributeStatusCodeApplying is a ClientVpnEndpointAttributeStatusCode enum value + ClientVpnEndpointAttributeStatusCodeApplying = "applying" + + // ClientVpnEndpointAttributeStatusCodeApplied is a ClientVpnEndpointAttributeStatusCode enum value + ClientVpnEndpointAttributeStatusCodeApplied = "applied" +) + +// ClientVpnEndpointAttributeStatusCode_Values returns all elements of the ClientVpnEndpointAttributeStatusCode enum +func ClientVpnEndpointAttributeStatusCode_Values() []string { + return []string{ + ClientVpnEndpointAttributeStatusCodeApplying, + ClientVpnEndpointAttributeStatusCodeApplied, + } +} + const ( // ClientVpnEndpointStatusCodePendingAssociate is a ClientVpnEndpointStatusCode enum value ClientVpnEndpointStatusCodePendingAssociate = "pending-associate" @@ -107044,6 +116935,16 @@ const ( ClientVpnEndpointStatusCodeDeleted = "deleted" ) +// ClientVpnEndpointStatusCode_Values returns all elements of the ClientVpnEndpointStatusCode enum +func ClientVpnEndpointStatusCode_Values() []string { + return []string{ + ClientVpnEndpointStatusCodePendingAssociate, + ClientVpnEndpointStatusCodeAvailable, + ClientVpnEndpointStatusCodeDeleting, + ClientVpnEndpointStatusCodeDeleted, + } +} + const ( // ClientVpnRouteStatusCodeCreating is a ClientVpnRouteStatusCode enum value ClientVpnRouteStatusCodeCreating = "creating" @@ -107058,6 +116959,16 @@ const ( ClientVpnRouteStatusCodeDeleting = "deleting" ) +// ClientVpnRouteStatusCode_Values returns all elements of the ClientVpnRouteStatusCode enum +func ClientVpnRouteStatusCode_Values() []string { + return []string{ + ClientVpnRouteStatusCodeCreating, + ClientVpnRouteStatusCodeActive, + ClientVpnRouteStatusCodeFailed, + ClientVpnRouteStatusCodeDeleting, + } +} + const ( // ConnectionNotificationStateEnabled is a ConnectionNotificationState enum value ConnectionNotificationStateEnabled = "Enabled" @@ -107066,16 +116977,38 @@ const ( ConnectionNotificationStateDisabled = "Disabled" ) +// ConnectionNotificationState_Values returns all elements of the ConnectionNotificationState enum +func ConnectionNotificationState_Values() []string { + return []string{ + ConnectionNotificationStateEnabled, + ConnectionNotificationStateDisabled, + } +} + const ( // ConnectionNotificationTypeTopic is a ConnectionNotificationType enum value ConnectionNotificationTypeTopic = "Topic" ) +// ConnectionNotificationType_Values returns all elements of the ConnectionNotificationType enum +func ConnectionNotificationType_Values() []string { + return []string{ + ConnectionNotificationTypeTopic, + } +} + const ( // ContainerFormatOva is a ContainerFormat enum value ContainerFormatOva = "ova" ) +// ContainerFormat_Values returns all elements of the ContainerFormat enum +func ContainerFormat_Values() []string { + return []string{ + ContainerFormatOva, + } +} + const ( // ConversionTaskStateActive is a ConversionTaskState enum value ConversionTaskStateActive = "active" @@ -107090,16 +117023,40 @@ const ( ConversionTaskStateCompleted = "completed" ) +// ConversionTaskState_Values returns all elements of the ConversionTaskState enum +func ConversionTaskState_Values() []string { + return []string{ + ConversionTaskStateActive, + ConversionTaskStateCancelling, + ConversionTaskStateCancelled, + ConversionTaskStateCompleted, + } +} + const ( // CopyTagsFromSourceVolume is a CopyTagsFromSource enum value CopyTagsFromSourceVolume = "volume" ) +// CopyTagsFromSource_Values returns all elements of the CopyTagsFromSource enum +func CopyTagsFromSource_Values() []string { + return []string{ + CopyTagsFromSourceVolume, + } +} + const ( // CurrencyCodeValuesUsd is a CurrencyCodeValues enum value CurrencyCodeValuesUsd = "USD" ) +// CurrencyCodeValues_Values returns all elements of the CurrencyCodeValues enum +func CurrencyCodeValues_Values() []string { + return []string{ + CurrencyCodeValuesUsd, + } +} + const ( // DatafeedSubscriptionStateActive is a DatafeedSubscriptionState enum value DatafeedSubscriptionStateActive = "Active" @@ -107108,6 +117065,14 @@ const ( DatafeedSubscriptionStateInactive = "Inactive" ) +// DatafeedSubscriptionState_Values returns all elements of the DatafeedSubscriptionState enum +func DatafeedSubscriptionState_Values() []string { + return []string{ + DatafeedSubscriptionStateActive, + DatafeedSubscriptionStateInactive, + } +} + const ( // DefaultRouteTableAssociationValueEnable is a DefaultRouteTableAssociationValue enum value DefaultRouteTableAssociationValueEnable = "enable" @@ -107116,6 +117081,14 @@ const ( DefaultRouteTableAssociationValueDisable = "disable" ) +// DefaultRouteTableAssociationValue_Values returns all elements of the DefaultRouteTableAssociationValue enum +func DefaultRouteTableAssociationValue_Values() []string { + return []string{ + DefaultRouteTableAssociationValueEnable, + DefaultRouteTableAssociationValueDisable, + } +} + const ( // DefaultRouteTablePropagationValueEnable is a DefaultRouteTablePropagationValue enum value DefaultRouteTablePropagationValueEnable = "enable" @@ -107124,6 +117097,14 @@ const ( DefaultRouteTablePropagationValueDisable = "disable" ) +// DefaultRouteTablePropagationValue_Values returns all elements of the DefaultRouteTablePropagationValue enum +func DefaultRouteTablePropagationValue_Values() []string { + return []string{ + DefaultRouteTablePropagationValueEnable, + DefaultRouteTablePropagationValueDisable, + } +} + const ( // DefaultTargetCapacityTypeSpot is a DefaultTargetCapacityType enum value DefaultTargetCapacityTypeSpot = "spot" @@ -107132,6 +117113,14 @@ const ( DefaultTargetCapacityTypeOnDemand = "on-demand" ) +// DefaultTargetCapacityType_Values returns all elements of the DefaultTargetCapacityType enum +func DefaultTargetCapacityType_Values() []string { + return []string{ + DefaultTargetCapacityTypeSpot, + DefaultTargetCapacityTypeOnDemand, + } +} + const ( // DeleteFleetErrorCodeFleetIdDoesNotExist is a DeleteFleetErrorCode enum value DeleteFleetErrorCodeFleetIdDoesNotExist = "fleetIdDoesNotExist" @@ -107146,6 +117135,16 @@ const ( DeleteFleetErrorCodeUnexpectedError = "unexpectedError" ) +// DeleteFleetErrorCode_Values returns all elements of the DeleteFleetErrorCode enum +func DeleteFleetErrorCode_Values() []string { + return []string{ + DeleteFleetErrorCodeFleetIdDoesNotExist, + DeleteFleetErrorCodeFleetIdMalformed, + DeleteFleetErrorCodeFleetNotInDeletableState, + DeleteFleetErrorCodeUnexpectedError, + } +} + const ( // DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid is a DeleteQueuedReservedInstancesErrorCode enum value DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid = "reserved-instances-id-invalid" @@ -107157,6 +117156,15 @@ const ( DeleteQueuedReservedInstancesErrorCodeUnexpectedError = "unexpected-error" ) +// DeleteQueuedReservedInstancesErrorCode_Values returns all elements of the DeleteQueuedReservedInstancesErrorCode enum +func DeleteQueuedReservedInstancesErrorCode_Values() []string { + return []string{ + DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid, + DeleteQueuedReservedInstancesErrorCodeReservedInstancesNotInQueuedState, + DeleteQueuedReservedInstancesErrorCodeUnexpectedError, + } +} + const ( // DeviceTypeEbs is a DeviceType enum value DeviceTypeEbs = "ebs" @@ -107165,6 +117173,14 @@ const ( DeviceTypeInstanceStore = "instance-store" ) +// DeviceType_Values returns all elements of the DeviceType enum +func DeviceType_Values() []string { + return []string{ + DeviceTypeEbs, + DeviceTypeInstanceStore, + } +} + const ( // DiskImageFormatVmdk is a DiskImageFormat enum value DiskImageFormatVmdk = "VMDK" @@ -107176,6 +117192,15 @@ const ( DiskImageFormatVhd = "VHD" ) +// DiskImageFormat_Values returns all elements of the DiskImageFormat enum +func DiskImageFormat_Values() []string { + return []string{ + DiskImageFormatVmdk, + DiskImageFormatRaw, + DiskImageFormatVhd, + } +} + const ( // DiskTypeHdd is a DiskType enum value DiskTypeHdd = "hdd" @@ -107184,6 +117209,14 @@ const ( DiskTypeSsd = "ssd" ) +// DiskType_Values returns all elements of the DiskType enum +func DiskType_Values() []string { + return []string{ + DiskTypeHdd, + DiskTypeSsd, + } +} + const ( // DnsNameStatePendingVerification is a DnsNameState enum value DnsNameStatePendingVerification = "pendingVerification" @@ -107195,6 +117228,15 @@ const ( DnsNameStateFailed = "failed" ) +// DnsNameState_Values returns all elements of the DnsNameState enum +func DnsNameState_Values() []string { + return []string{ + DnsNameStatePendingVerification, + DnsNameStateVerified, + DnsNameStateFailed, + } +} + const ( // DnsSupportValueEnable is a DnsSupportValue enum value DnsSupportValueEnable = "enable" @@ -107203,6 +117245,14 @@ const ( DnsSupportValueDisable = "disable" ) +// DnsSupportValue_Values returns all elements of the DnsSupportValue enum +func DnsSupportValue_Values() []string { + return []string{ + DnsSupportValueEnable, + DnsSupportValueDisable, + } +} + const ( // DomainTypeVpc is a DomainType enum value DomainTypeVpc = "vpc" @@ -107211,6 +117261,14 @@ const ( DomainTypeStandard = "standard" ) +// DomainType_Values returns all elements of the DomainType enum +func DomainType_Values() []string { + return []string{ + DomainTypeVpc, + DomainTypeStandard, + } +} + const ( // EbsEncryptionSupportUnsupported is a EbsEncryptionSupport enum value EbsEncryptionSupportUnsupported = "unsupported" @@ -107219,6 +117277,34 @@ const ( EbsEncryptionSupportSupported = "supported" ) +// EbsEncryptionSupport_Values returns all elements of the EbsEncryptionSupport enum +func EbsEncryptionSupport_Values() []string { + return []string{ + EbsEncryptionSupportUnsupported, + EbsEncryptionSupportSupported, + } +} + +const ( + // EbsNvmeSupportUnsupported is a EbsNvmeSupport enum value + EbsNvmeSupportUnsupported = "unsupported" + + // EbsNvmeSupportSupported is a EbsNvmeSupport enum value + EbsNvmeSupportSupported = "supported" + + // EbsNvmeSupportRequired is a EbsNvmeSupport enum value + EbsNvmeSupportRequired = "required" +) + +// EbsNvmeSupport_Values returns all elements of the EbsNvmeSupport enum +func EbsNvmeSupport_Values() []string { + return []string{ + EbsNvmeSupportUnsupported, + EbsNvmeSupportSupported, + EbsNvmeSupportRequired, + } +} + const ( // EbsOptimizedSupportUnsupported is a EbsOptimizedSupport enum value EbsOptimizedSupportUnsupported = "unsupported" @@ -107230,11 +117316,27 @@ const ( EbsOptimizedSupportDefault = "default" ) +// EbsOptimizedSupport_Values returns all elements of the EbsOptimizedSupport enum +func EbsOptimizedSupport_Values() []string { + return []string{ + EbsOptimizedSupportUnsupported, + EbsOptimizedSupportSupported, + EbsOptimizedSupportDefault, + } +} + const ( // ElasticGpuStateAttached is a ElasticGpuState enum value ElasticGpuStateAttached = "ATTACHED" ) +// ElasticGpuState_Values returns all elements of the ElasticGpuState enum +func ElasticGpuState_Values() []string { + return []string{ + ElasticGpuStateAttached, + } +} + const ( // ElasticGpuStatusOk is a ElasticGpuStatus enum value ElasticGpuStatusOk = "OK" @@ -107243,6 +117345,14 @@ const ( ElasticGpuStatusImpaired = "IMPAIRED" ) +// ElasticGpuStatus_Values returns all elements of the ElasticGpuStatus enum +func ElasticGpuStatus_Values() []string { + return []string{ + ElasticGpuStatusOk, + ElasticGpuStatusImpaired, + } +} + const ( // EnaSupportUnsupported is a EnaSupport enum value EnaSupportUnsupported = "unsupported" @@ -107254,6 +117364,15 @@ const ( EnaSupportRequired = "required" ) +// EnaSupport_Values returns all elements of the EnaSupport enum +func EnaSupport_Values() []string { + return []string{ + EnaSupportUnsupported, + EnaSupportSupported, + EnaSupportRequired, + } +} + const ( // EndDateTypeUnlimited is a EndDateType enum value EndDateTypeUnlimited = "unlimited" @@ -107262,6 +117381,34 @@ const ( EndDateTypeLimited = "limited" ) +// EndDateType_Values returns all elements of the EndDateType enum +func EndDateType_Values() []string { + return []string{ + EndDateTypeUnlimited, + EndDateTypeLimited, + } +} + +const ( + // EphemeralNvmeSupportUnsupported is a EphemeralNvmeSupport enum value + EphemeralNvmeSupportUnsupported = "unsupported" + + // EphemeralNvmeSupportSupported is a EphemeralNvmeSupport enum value + EphemeralNvmeSupportSupported = "supported" + + // EphemeralNvmeSupportRequired is a EphemeralNvmeSupport enum value + EphemeralNvmeSupportRequired = "required" +) + +// EphemeralNvmeSupport_Values returns all elements of the EphemeralNvmeSupport enum +func EphemeralNvmeSupport_Values() []string { + return []string{ + EphemeralNvmeSupportUnsupported, + EphemeralNvmeSupportSupported, + EphemeralNvmeSupportRequired, + } +} + const ( // EventCodeInstanceReboot is a EventCode enum value EventCodeInstanceReboot = "instance-reboot" @@ -107279,6 +117426,17 @@ const ( EventCodeInstanceStop = "instance-stop" ) +// EventCode_Values returns all elements of the EventCode enum +func EventCode_Values() []string { + return []string{ + EventCodeInstanceReboot, + EventCodeSystemReboot, + EventCodeSystemMaintenance, + EventCodeInstanceRetirement, + EventCodeInstanceStop, + } +} + const ( // EventTypeInstanceChange is a EventType enum value EventTypeInstanceChange = "instanceChange" @@ -107293,6 +117451,16 @@ const ( EventTypeInformation = "information" ) +// EventType_Values returns all elements of the EventType enum +func EventType_Values() []string { + return []string{ + EventTypeInstanceChange, + EventTypeFleetRequestChange, + EventTypeError, + EventTypeInformation, + } +} + const ( // ExcessCapacityTerminationPolicyNoTermination is a ExcessCapacityTerminationPolicy enum value ExcessCapacityTerminationPolicyNoTermination = "noTermination" @@ -107301,6 +117469,14 @@ const ( ExcessCapacityTerminationPolicyDefault = "default" ) +// ExcessCapacityTerminationPolicy_Values returns all elements of the ExcessCapacityTerminationPolicy enum +func ExcessCapacityTerminationPolicy_Values() []string { + return []string{ + ExcessCapacityTerminationPolicyNoTermination, + ExcessCapacityTerminationPolicyDefault, + } +} + const ( // ExportEnvironmentCitrix is a ExportEnvironment enum value ExportEnvironmentCitrix = "citrix" @@ -107312,6 +117488,15 @@ const ( ExportEnvironmentMicrosoft = "microsoft" ) +// ExportEnvironment_Values returns all elements of the ExportEnvironment enum +func ExportEnvironment_Values() []string { + return []string{ + ExportEnvironmentCitrix, + ExportEnvironmentVmware, + ExportEnvironmentMicrosoft, + } +} + const ( // ExportTaskStateActive is a ExportTaskState enum value ExportTaskStateActive = "active" @@ -107326,6 +117511,16 @@ const ( ExportTaskStateCompleted = "completed" ) +// ExportTaskState_Values returns all elements of the ExportTaskState enum +func ExportTaskState_Values() []string { + return []string{ + ExportTaskStateActive, + ExportTaskStateCancelling, + ExportTaskStateCancelled, + ExportTaskStateCompleted, + } +} + const ( // FastSnapshotRestoreStateCodeEnabling is a FastSnapshotRestoreStateCode enum value FastSnapshotRestoreStateCodeEnabling = "enabling" @@ -107343,6 +117538,17 @@ const ( FastSnapshotRestoreStateCodeDisabled = "disabled" ) +// FastSnapshotRestoreStateCode_Values returns all elements of the FastSnapshotRestoreStateCode enum +func FastSnapshotRestoreStateCode_Values() []string { + return []string{ + FastSnapshotRestoreStateCodeEnabling, + FastSnapshotRestoreStateCodeOptimizing, + FastSnapshotRestoreStateCodeEnabled, + FastSnapshotRestoreStateCodeDisabling, + FastSnapshotRestoreStateCodeDisabled, + } +} + const ( // FleetActivityStatusError is a FleetActivityStatus enum value FleetActivityStatusError = "error" @@ -107357,11 +117563,28 @@ const ( FleetActivityStatusFulfilled = "fulfilled" ) +// FleetActivityStatus_Values returns all elements of the FleetActivityStatus enum +func FleetActivityStatus_Values() []string { + return []string{ + FleetActivityStatusError, + FleetActivityStatusPendingFulfillment, + FleetActivityStatusPendingTermination, + FleetActivityStatusFulfilled, + } +} + const ( // FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst is a FleetCapacityReservationUsageStrategy enum value FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst = "use-capacity-reservations-first" ) +// FleetCapacityReservationUsageStrategy_Values returns all elements of the FleetCapacityReservationUsageStrategy enum +func FleetCapacityReservationUsageStrategy_Values() []string { + return []string{ + FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst, + } +} + const ( // FleetEventTypeInstanceChange is a FleetEventType enum value FleetEventTypeInstanceChange = "instance-change" @@ -107373,6 +117596,15 @@ const ( FleetEventTypeServiceError = "service-error" ) +// FleetEventType_Values returns all elements of the FleetEventType enum +func FleetEventType_Values() []string { + return []string{ + FleetEventTypeInstanceChange, + FleetEventTypeFleetChange, + FleetEventTypeServiceError, + } +} + const ( // FleetExcessCapacityTerminationPolicyNoTermination is a FleetExcessCapacityTerminationPolicy enum value FleetExcessCapacityTerminationPolicyNoTermination = "no-termination" @@ -107381,6 +117613,14 @@ const ( FleetExcessCapacityTerminationPolicyTermination = "termination" ) +// FleetExcessCapacityTerminationPolicy_Values returns all elements of the FleetExcessCapacityTerminationPolicy enum +func FleetExcessCapacityTerminationPolicy_Values() []string { + return []string{ + FleetExcessCapacityTerminationPolicyNoTermination, + FleetExcessCapacityTerminationPolicyTermination, + } +} + const ( // FleetOnDemandAllocationStrategyLowestPrice is a FleetOnDemandAllocationStrategy enum value FleetOnDemandAllocationStrategyLowestPrice = "lowest-price" @@ -107389,6 +117629,26 @@ const ( FleetOnDemandAllocationStrategyPrioritized = "prioritized" ) +// FleetOnDemandAllocationStrategy_Values returns all elements of the FleetOnDemandAllocationStrategy enum +func FleetOnDemandAllocationStrategy_Values() []string { + return []string{ + FleetOnDemandAllocationStrategyLowestPrice, + FleetOnDemandAllocationStrategyPrioritized, + } +} + +const ( + // FleetReplacementStrategyLaunch is a FleetReplacementStrategy enum value + FleetReplacementStrategyLaunch = "launch" +) + +// FleetReplacementStrategy_Values returns all elements of the FleetReplacementStrategy enum +func FleetReplacementStrategy_Values() []string { + return []string{ + FleetReplacementStrategyLaunch, + } +} + const ( // FleetStateCodeSubmitted is a FleetStateCode enum value FleetStateCodeSubmitted = "submitted" @@ -107412,6 +117672,19 @@ const ( FleetStateCodeModifying = "modifying" ) +// FleetStateCode_Values returns all elements of the FleetStateCode enum +func FleetStateCode_Values() []string { + return []string{ + FleetStateCodeSubmitted, + FleetStateCodeActive, + FleetStateCodeDeleted, + FleetStateCodeFailed, + FleetStateCodeDeletedRunning, + FleetStateCodeDeletedTerminating, + FleetStateCodeModifying, + } +} + const ( // FleetTypeRequest is a FleetType enum value FleetTypeRequest = "request" @@ -107423,6 +117696,15 @@ const ( FleetTypeInstant = "instant" ) +// FleetType_Values returns all elements of the FleetType enum +func FleetType_Values() []string { + return []string{ + FleetTypeRequest, + FleetTypeMaintain, + FleetTypeInstant, + } +} + const ( // FlowLogsResourceTypeVpc is a FlowLogsResourceType enum value FlowLogsResourceTypeVpc = "VPC" @@ -107434,6 +117716,15 @@ const ( FlowLogsResourceTypeNetworkInterface = "NetworkInterface" ) +// FlowLogsResourceType_Values returns all elements of the FlowLogsResourceType enum +func FlowLogsResourceType_Values() []string { + return []string{ + FlowLogsResourceTypeVpc, + FlowLogsResourceTypeSubnet, + FlowLogsResourceTypeNetworkInterface, + } +} + const ( // FpgaImageAttributeNameDescription is a FpgaImageAttributeName enum value FpgaImageAttributeNameDescription = "description" @@ -107448,6 +117739,16 @@ const ( FpgaImageAttributeNameProductCodes = "productCodes" ) +// FpgaImageAttributeName_Values returns all elements of the FpgaImageAttributeName enum +func FpgaImageAttributeName_Values() []string { + return []string{ + FpgaImageAttributeNameDescription, + FpgaImageAttributeNameName, + FpgaImageAttributeNameLoadPermission, + FpgaImageAttributeNameProductCodes, + } +} + const ( // FpgaImageStateCodePending is a FpgaImageStateCode enum value FpgaImageStateCodePending = "pending" @@ -107462,11 +117763,28 @@ const ( FpgaImageStateCodeUnavailable = "unavailable" ) +// FpgaImageStateCode_Values returns all elements of the FpgaImageStateCode enum +func FpgaImageStateCode_Values() []string { + return []string{ + FpgaImageStateCodePending, + FpgaImageStateCodeFailed, + FpgaImageStateCodeAvailable, + FpgaImageStateCodeUnavailable, + } +} + const ( // GatewayTypeIpsec1 is a GatewayType enum value GatewayTypeIpsec1 = "ipsec.1" ) +// GatewayType_Values returns all elements of the GatewayType enum +func GatewayType_Values() []string { + return []string{ + GatewayTypeIpsec1, + } +} + const ( // HostRecoveryOn is a HostRecovery enum value HostRecoveryOn = "on" @@ -107475,6 +117793,14 @@ const ( HostRecoveryOff = "off" ) +// HostRecovery_Values returns all elements of the HostRecovery enum +func HostRecovery_Values() []string { + return []string{ + HostRecoveryOn, + HostRecoveryOff, + } +} + const ( // HostTenancyDedicated is a HostTenancy enum value HostTenancyDedicated = "dedicated" @@ -107483,6 +117809,14 @@ const ( HostTenancyHost = "host" ) +// HostTenancy_Values returns all elements of the HostTenancy enum +func HostTenancy_Values() []string { + return []string{ + HostTenancyDedicated, + HostTenancyHost, + } +} + const ( // HttpTokensStateOptional is a HttpTokensState enum value HttpTokensStateOptional = "optional" @@ -107491,6 +117825,14 @@ const ( HttpTokensStateRequired = "required" ) +// HttpTokensState_Values returns all elements of the HttpTokensState enum +func HttpTokensState_Values() []string { + return []string{ + HttpTokensStateOptional, + HttpTokensStateRequired, + } +} + const ( // HypervisorTypeOvm is a HypervisorType enum value HypervisorTypeOvm = "ovm" @@ -107499,6 +117841,14 @@ const ( HypervisorTypeXen = "xen" ) +// HypervisorType_Values returns all elements of the HypervisorType enum +func HypervisorType_Values() []string { + return []string{ + HypervisorTypeOvm, + HypervisorTypeXen, + } +} + const ( // IamInstanceProfileAssociationStateAssociating is a IamInstanceProfileAssociationState enum value IamInstanceProfileAssociationStateAssociating = "associating" @@ -107513,6 +117863,16 @@ const ( IamInstanceProfileAssociationStateDisassociated = "disassociated" ) +// IamInstanceProfileAssociationState_Values returns all elements of the IamInstanceProfileAssociationState enum +func IamInstanceProfileAssociationState_Values() []string { + return []string{ + IamInstanceProfileAssociationStateAssociating, + IamInstanceProfileAssociationStateAssociated, + IamInstanceProfileAssociationStateDisassociating, + IamInstanceProfileAssociationStateDisassociated, + } +} + const ( // ImageAttributeNameDescription is a ImageAttributeName enum value ImageAttributeNameDescription = "description" @@ -107536,6 +117896,19 @@ const ( ImageAttributeNameSriovNetSupport = "sriovNetSupport" ) +// ImageAttributeName_Values returns all elements of the ImageAttributeName enum +func ImageAttributeName_Values() []string { + return []string{ + ImageAttributeNameDescription, + ImageAttributeNameKernel, + ImageAttributeNameRamdisk, + ImageAttributeNameLaunchPermission, + ImageAttributeNameProductCodes, + ImageAttributeNameBlockDeviceMapping, + ImageAttributeNameSriovNetSupport, + } +} + const ( // ImageStatePending is a ImageState enum value ImageStatePending = "pending" @@ -107559,6 +117932,19 @@ const ( ImageStateError = "error" ) +// ImageState_Values returns all elements of the ImageState enum +func ImageState_Values() []string { + return []string{ + ImageStatePending, + ImageStateAvailable, + ImageStateInvalid, + ImageStateDeregistered, + ImageStateTransient, + ImageStateFailed, + ImageStateError, + } +} + const ( // ImageTypeValuesMachine is a ImageTypeValues enum value ImageTypeValuesMachine = "machine" @@ -107570,6 +117956,15 @@ const ( ImageTypeValuesRamdisk = "ramdisk" ) +// ImageTypeValues_Values returns all elements of the ImageTypeValues enum +func ImageTypeValues_Values() []string { + return []string{ + ImageTypeValuesMachine, + ImageTypeValuesKernel, + ImageTypeValuesRamdisk, + } +} + const ( // InstanceAttributeNameInstanceType is a InstanceAttributeName enum value InstanceAttributeNameInstanceType = "instanceType" @@ -107612,8 +118007,32 @@ const ( // InstanceAttributeNameEnaSupport is a InstanceAttributeName enum value InstanceAttributeNameEnaSupport = "enaSupport" + + // InstanceAttributeNameEnclaveOptions is a InstanceAttributeName enum value + InstanceAttributeNameEnclaveOptions = "enclaveOptions" ) +// InstanceAttributeName_Values returns all elements of the InstanceAttributeName enum +func InstanceAttributeName_Values() []string { + return []string{ + InstanceAttributeNameInstanceType, + InstanceAttributeNameKernel, + InstanceAttributeNameRamdisk, + InstanceAttributeNameUserData, + InstanceAttributeNameDisableApiTermination, + InstanceAttributeNameInstanceInitiatedShutdownBehavior, + InstanceAttributeNameRootDeviceName, + InstanceAttributeNameBlockDeviceMapping, + InstanceAttributeNameProductCodes, + InstanceAttributeNameSourceDestCheck, + InstanceAttributeNameGroupSet, + InstanceAttributeNameEbsOptimized, + InstanceAttributeNameSriovNetSupport, + InstanceAttributeNameEnaSupport, + InstanceAttributeNameEnclaveOptions, + } +} + const ( // InstanceHealthStatusHealthy is a InstanceHealthStatus enum value InstanceHealthStatusHealthy = "healthy" @@ -107622,6 +118041,14 @@ const ( InstanceHealthStatusUnhealthy = "unhealthy" ) +// InstanceHealthStatus_Values returns all elements of the InstanceHealthStatus enum +func InstanceHealthStatus_Values() []string { + return []string{ + InstanceHealthStatusHealthy, + InstanceHealthStatusUnhealthy, + } +} + const ( // InstanceInterruptionBehaviorHibernate is a InstanceInterruptionBehavior enum value InstanceInterruptionBehaviorHibernate = "hibernate" @@ -107633,6 +118060,15 @@ const ( InstanceInterruptionBehaviorTerminate = "terminate" ) +// InstanceInterruptionBehavior_Values returns all elements of the InstanceInterruptionBehavior enum +func InstanceInterruptionBehavior_Values() []string { + return []string{ + InstanceInterruptionBehaviorHibernate, + InstanceInterruptionBehaviorStop, + InstanceInterruptionBehaviorTerminate, + } +} + const ( // InstanceLifecycleSpot is a InstanceLifecycle enum value InstanceLifecycleSpot = "spot" @@ -107641,6 +118077,14 @@ const ( InstanceLifecycleOnDemand = "on-demand" ) +// InstanceLifecycle_Values returns all elements of the InstanceLifecycle enum +func InstanceLifecycle_Values() []string { + return []string{ + InstanceLifecycleSpot, + InstanceLifecycleOnDemand, + } +} + const ( // InstanceLifecycleTypeSpot is a InstanceLifecycleType enum value InstanceLifecycleTypeSpot = "spot" @@ -107649,6 +118093,14 @@ const ( InstanceLifecycleTypeScheduled = "scheduled" ) +// InstanceLifecycleType_Values returns all elements of the InstanceLifecycleType enum +func InstanceLifecycleType_Values() []string { + return []string{ + InstanceLifecycleTypeSpot, + InstanceLifecycleTypeScheduled, + } +} + const ( // InstanceMatchCriteriaOpen is a InstanceMatchCriteria enum value InstanceMatchCriteriaOpen = "open" @@ -107657,6 +118109,14 @@ const ( InstanceMatchCriteriaTargeted = "targeted" ) +// InstanceMatchCriteria_Values returns all elements of the InstanceMatchCriteria enum +func InstanceMatchCriteria_Values() []string { + return []string{ + InstanceMatchCriteriaOpen, + InstanceMatchCriteriaTargeted, + } +} + const ( // InstanceMetadataEndpointStateDisabled is a InstanceMetadataEndpointState enum value InstanceMetadataEndpointStateDisabled = "disabled" @@ -107665,6 +118125,14 @@ const ( InstanceMetadataEndpointStateEnabled = "enabled" ) +// InstanceMetadataEndpointState_Values returns all elements of the InstanceMetadataEndpointState enum +func InstanceMetadataEndpointState_Values() []string { + return []string{ + InstanceMetadataEndpointStateDisabled, + InstanceMetadataEndpointStateEnabled, + } +} + const ( // InstanceMetadataOptionsStatePending is a InstanceMetadataOptionsState enum value InstanceMetadataOptionsStatePending = "pending" @@ -107673,6 +118141,14 @@ const ( InstanceMetadataOptionsStateApplied = "applied" ) +// InstanceMetadataOptionsState_Values returns all elements of the InstanceMetadataOptionsState enum +func InstanceMetadataOptionsState_Values() []string { + return []string{ + InstanceMetadataOptionsStatePending, + InstanceMetadataOptionsStateApplied, + } +} + const ( // InstanceStateNamePending is a InstanceStateName enum value InstanceStateNamePending = "pending" @@ -107693,6 +118169,18 @@ const ( InstanceStateNameStopped = "stopped" ) +// InstanceStateName_Values returns all elements of the InstanceStateName enum +func InstanceStateName_Values() []string { + return []string{ + InstanceStateNamePending, + InstanceStateNameRunning, + InstanceStateNameShuttingDown, + InstanceStateNameTerminated, + InstanceStateNameStopping, + InstanceStateNameStopped, + } +} + const ( // InstanceTypeT1Micro is a InstanceType enum value InstanceTypeT1Micro = "t1.micro" @@ -107760,6 +118248,27 @@ const ( // InstanceTypeT3a2xlarge is a InstanceType enum value InstanceTypeT3a2xlarge = "t3a.2xlarge" + // InstanceTypeT4gNano is a InstanceType enum value + InstanceTypeT4gNano = "t4g.nano" + + // InstanceTypeT4gMicro is a InstanceType enum value + InstanceTypeT4gMicro = "t4g.micro" + + // InstanceTypeT4gSmall is a InstanceType enum value + InstanceTypeT4gSmall = "t4g.small" + + // InstanceTypeT4gMedium is a InstanceType enum value + InstanceTypeT4gMedium = "t4g.medium" + + // InstanceTypeT4gLarge is a InstanceType enum value + InstanceTypeT4gLarge = "t4g.large" + + // InstanceTypeT4gXlarge is a InstanceType enum value + InstanceTypeT4gXlarge = "t4g.xlarge" + + // InstanceTypeT4g2xlarge is a InstanceType enum value + InstanceTypeT4g2xlarge = "t4g.2xlarge" + // InstanceTypeM1Small is a InstanceType enum value InstanceTypeM1Small = "m1.small" @@ -107949,6 +118458,60 @@ const ( // InstanceTypeR5ad24xlarge is a InstanceType enum value InstanceTypeR5ad24xlarge = "r5ad.24xlarge" + // InstanceTypeR6gMetal is a InstanceType enum value + InstanceTypeR6gMetal = "r6g.metal" + + // InstanceTypeR6gMedium is a InstanceType enum value + InstanceTypeR6gMedium = "r6g.medium" + + // InstanceTypeR6gLarge is a InstanceType enum value + InstanceTypeR6gLarge = "r6g.large" + + // InstanceTypeR6gXlarge is a InstanceType enum value + InstanceTypeR6gXlarge = "r6g.xlarge" + + // InstanceTypeR6g2xlarge is a InstanceType enum value + InstanceTypeR6g2xlarge = "r6g.2xlarge" + + // InstanceTypeR6g4xlarge is a InstanceType enum value + InstanceTypeR6g4xlarge = "r6g.4xlarge" + + // InstanceTypeR6g8xlarge is a InstanceType enum value + InstanceTypeR6g8xlarge = "r6g.8xlarge" + + // InstanceTypeR6g12xlarge is a InstanceType enum value + InstanceTypeR6g12xlarge = "r6g.12xlarge" + + // InstanceTypeR6g16xlarge is a InstanceType enum value + InstanceTypeR6g16xlarge = "r6g.16xlarge" + + // InstanceTypeR6gdMetal is a InstanceType enum value + InstanceTypeR6gdMetal = "r6gd.metal" + + // InstanceTypeR6gdMedium is a InstanceType enum value + InstanceTypeR6gdMedium = "r6gd.medium" + + // InstanceTypeR6gdLarge is a InstanceType enum value + InstanceTypeR6gdLarge = "r6gd.large" + + // InstanceTypeR6gdXlarge is a InstanceType enum value + InstanceTypeR6gdXlarge = "r6gd.xlarge" + + // InstanceTypeR6gd2xlarge is a InstanceType enum value + InstanceTypeR6gd2xlarge = "r6gd.2xlarge" + + // InstanceTypeR6gd4xlarge is a InstanceType enum value + InstanceTypeR6gd4xlarge = "r6gd.4xlarge" + + // InstanceTypeR6gd8xlarge is a InstanceType enum value + InstanceTypeR6gd8xlarge = "r6gd.8xlarge" + + // InstanceTypeR6gd12xlarge is a InstanceType enum value + InstanceTypeR6gd12xlarge = "r6gd.12xlarge" + + // InstanceTypeR6gd16xlarge is a InstanceType enum value + InstanceTypeR6gd16xlarge = "r6gd.16xlarge" + // InstanceTypeX116xlarge is a InstanceType enum value InstanceTypeX116xlarge = "x1.16xlarge" @@ -108099,6 +118662,54 @@ const ( // InstanceTypeC5Metal is a InstanceType enum value InstanceTypeC5Metal = "c5.metal" + // InstanceTypeC5aLarge is a InstanceType enum value + InstanceTypeC5aLarge = "c5a.large" + + // InstanceTypeC5aXlarge is a InstanceType enum value + InstanceTypeC5aXlarge = "c5a.xlarge" + + // InstanceTypeC5a2xlarge is a InstanceType enum value + InstanceTypeC5a2xlarge = "c5a.2xlarge" + + // InstanceTypeC5a4xlarge is a InstanceType enum value + InstanceTypeC5a4xlarge = "c5a.4xlarge" + + // InstanceTypeC5a8xlarge is a InstanceType enum value + InstanceTypeC5a8xlarge = "c5a.8xlarge" + + // InstanceTypeC5a12xlarge is a InstanceType enum value + InstanceTypeC5a12xlarge = "c5a.12xlarge" + + // InstanceTypeC5a16xlarge is a InstanceType enum value + InstanceTypeC5a16xlarge = "c5a.16xlarge" + + // InstanceTypeC5a24xlarge is a InstanceType enum value + InstanceTypeC5a24xlarge = "c5a.24xlarge" + + // InstanceTypeC5adLarge is a InstanceType enum value + InstanceTypeC5adLarge = "c5ad.large" + + // InstanceTypeC5adXlarge is a InstanceType enum value + InstanceTypeC5adXlarge = "c5ad.xlarge" + + // InstanceTypeC5ad2xlarge is a InstanceType enum value + InstanceTypeC5ad2xlarge = "c5ad.2xlarge" + + // InstanceTypeC5ad4xlarge is a InstanceType enum value + InstanceTypeC5ad4xlarge = "c5ad.4xlarge" + + // InstanceTypeC5ad8xlarge is a InstanceType enum value + InstanceTypeC5ad8xlarge = "c5ad.8xlarge" + + // InstanceTypeC5ad12xlarge is a InstanceType enum value + InstanceTypeC5ad12xlarge = "c5ad.12xlarge" + + // InstanceTypeC5ad16xlarge is a InstanceType enum value + InstanceTypeC5ad16xlarge = "c5ad.16xlarge" + + // InstanceTypeC5ad24xlarge is a InstanceType enum value + InstanceTypeC5ad24xlarge = "c5ad.24xlarge" + // InstanceTypeC5dLarge is a InstanceType enum value InstanceTypeC5dLarge = "c5d.large" @@ -108144,6 +118755,60 @@ const ( // InstanceTypeC5n18xlarge is a InstanceType enum value InstanceTypeC5n18xlarge = "c5n.18xlarge" + // InstanceTypeC6gMetal is a InstanceType enum value + InstanceTypeC6gMetal = "c6g.metal" + + // InstanceTypeC6gMedium is a InstanceType enum value + InstanceTypeC6gMedium = "c6g.medium" + + // InstanceTypeC6gLarge is a InstanceType enum value + InstanceTypeC6gLarge = "c6g.large" + + // InstanceTypeC6gXlarge is a InstanceType enum value + InstanceTypeC6gXlarge = "c6g.xlarge" + + // InstanceTypeC6g2xlarge is a InstanceType enum value + InstanceTypeC6g2xlarge = "c6g.2xlarge" + + // InstanceTypeC6g4xlarge is a InstanceType enum value + InstanceTypeC6g4xlarge = "c6g.4xlarge" + + // InstanceTypeC6g8xlarge is a InstanceType enum value + InstanceTypeC6g8xlarge = "c6g.8xlarge" + + // InstanceTypeC6g12xlarge is a InstanceType enum value + InstanceTypeC6g12xlarge = "c6g.12xlarge" + + // InstanceTypeC6g16xlarge is a InstanceType enum value + InstanceTypeC6g16xlarge = "c6g.16xlarge" + + // InstanceTypeC6gdMetal is a InstanceType enum value + InstanceTypeC6gdMetal = "c6gd.metal" + + // InstanceTypeC6gdMedium is a InstanceType enum value + InstanceTypeC6gdMedium = "c6gd.medium" + + // InstanceTypeC6gdLarge is a InstanceType enum value + InstanceTypeC6gdLarge = "c6gd.large" + + // InstanceTypeC6gdXlarge is a InstanceType enum value + InstanceTypeC6gdXlarge = "c6gd.xlarge" + + // InstanceTypeC6gd2xlarge is a InstanceType enum value + InstanceTypeC6gd2xlarge = "c6gd.2xlarge" + + // InstanceTypeC6gd4xlarge is a InstanceType enum value + InstanceTypeC6gd4xlarge = "c6gd.4xlarge" + + // InstanceTypeC6gd8xlarge is a InstanceType enum value + InstanceTypeC6gd8xlarge = "c6gd.8xlarge" + + // InstanceTypeC6gd12xlarge is a InstanceType enum value + InstanceTypeC6gd12xlarge = "c6gd.12xlarge" + + // InstanceTypeC6gd16xlarge is a InstanceType enum value + InstanceTypeC6gd16xlarge = "c6gd.16xlarge" + // InstanceTypeCc14xlarge is a InstanceType enum value InstanceTypeCc14xlarge = "cc1.4xlarge" @@ -108186,6 +118851,9 @@ const ( // InstanceTypeG4dn16xlarge is a InstanceType enum value InstanceTypeG4dn16xlarge = "g4dn.16xlarge" + // InstanceTypeG4dnMetal is a InstanceType enum value + InstanceTypeG4dnMetal = "g4dn.metal" + // InstanceTypeCg14xlarge is a InstanceType enum value InstanceTypeCg14xlarge = "cg1.4xlarge" @@ -108210,6 +118878,9 @@ const ( // InstanceTypeP3dn24xlarge is a InstanceType enum value InstanceTypeP3dn24xlarge = "p3dn.24xlarge" + // InstanceTypeP4d24xlarge is a InstanceType enum value + InstanceTypeP4d24xlarge = "p4d.24xlarge" + // InstanceTypeD2Xlarge is a InstanceType enum value InstanceTypeD2Xlarge = "d2.xlarge" @@ -108506,8 +119177,418 @@ const ( // InstanceTypeInf124xlarge is a InstanceType enum value InstanceTypeInf124xlarge = "inf1.24xlarge" + + // InstanceTypeM6gMetal is a InstanceType enum value + InstanceTypeM6gMetal = "m6g.metal" + + // InstanceTypeM6gMedium is a InstanceType enum value + InstanceTypeM6gMedium = "m6g.medium" + + // InstanceTypeM6gLarge is a InstanceType enum value + InstanceTypeM6gLarge = "m6g.large" + + // InstanceTypeM6gXlarge is a InstanceType enum value + InstanceTypeM6gXlarge = "m6g.xlarge" + + // InstanceTypeM6g2xlarge is a InstanceType enum value + InstanceTypeM6g2xlarge = "m6g.2xlarge" + + // InstanceTypeM6g4xlarge is a InstanceType enum value + InstanceTypeM6g4xlarge = "m6g.4xlarge" + + // InstanceTypeM6g8xlarge is a InstanceType enum value + InstanceTypeM6g8xlarge = "m6g.8xlarge" + + // InstanceTypeM6g12xlarge is a InstanceType enum value + InstanceTypeM6g12xlarge = "m6g.12xlarge" + + // InstanceTypeM6g16xlarge is a InstanceType enum value + InstanceTypeM6g16xlarge = "m6g.16xlarge" + + // InstanceTypeM6gdMetal is a InstanceType enum value + InstanceTypeM6gdMetal = "m6gd.metal" + + // InstanceTypeM6gdMedium is a InstanceType enum value + InstanceTypeM6gdMedium = "m6gd.medium" + + // InstanceTypeM6gdLarge is a InstanceType enum value + InstanceTypeM6gdLarge = "m6gd.large" + + // InstanceTypeM6gdXlarge is a InstanceType enum value + InstanceTypeM6gdXlarge = "m6gd.xlarge" + + // InstanceTypeM6gd2xlarge is a InstanceType enum value + InstanceTypeM6gd2xlarge = "m6gd.2xlarge" + + // InstanceTypeM6gd4xlarge is a InstanceType enum value + InstanceTypeM6gd4xlarge = "m6gd.4xlarge" + + // InstanceTypeM6gd8xlarge is a InstanceType enum value + InstanceTypeM6gd8xlarge = "m6gd.8xlarge" + + // InstanceTypeM6gd12xlarge is a InstanceType enum value + InstanceTypeM6gd12xlarge = "m6gd.12xlarge" + + // InstanceTypeM6gd16xlarge is a InstanceType enum value + InstanceTypeM6gd16xlarge = "m6gd.16xlarge" ) +// InstanceType_Values returns all elements of the InstanceType enum +func InstanceType_Values() []string { + return []string{ + InstanceTypeT1Micro, + InstanceTypeT2Nano, + InstanceTypeT2Micro, + InstanceTypeT2Small, + InstanceTypeT2Medium, + InstanceTypeT2Large, + InstanceTypeT2Xlarge, + InstanceTypeT22xlarge, + InstanceTypeT3Nano, + InstanceTypeT3Micro, + InstanceTypeT3Small, + InstanceTypeT3Medium, + InstanceTypeT3Large, + InstanceTypeT3Xlarge, + InstanceTypeT32xlarge, + InstanceTypeT3aNano, + InstanceTypeT3aMicro, + InstanceTypeT3aSmall, + InstanceTypeT3aMedium, + InstanceTypeT3aLarge, + InstanceTypeT3aXlarge, + InstanceTypeT3a2xlarge, + InstanceTypeT4gNano, + InstanceTypeT4gMicro, + InstanceTypeT4gSmall, + InstanceTypeT4gMedium, + InstanceTypeT4gLarge, + InstanceTypeT4gXlarge, + InstanceTypeT4g2xlarge, + InstanceTypeM1Small, + InstanceTypeM1Medium, + InstanceTypeM1Large, + InstanceTypeM1Xlarge, + InstanceTypeM3Medium, + InstanceTypeM3Large, + InstanceTypeM3Xlarge, + InstanceTypeM32xlarge, + InstanceTypeM4Large, + InstanceTypeM4Xlarge, + InstanceTypeM42xlarge, + InstanceTypeM44xlarge, + InstanceTypeM410xlarge, + InstanceTypeM416xlarge, + InstanceTypeM2Xlarge, + InstanceTypeM22xlarge, + InstanceTypeM24xlarge, + InstanceTypeCr18xlarge, + InstanceTypeR3Large, + InstanceTypeR3Xlarge, + InstanceTypeR32xlarge, + InstanceTypeR34xlarge, + InstanceTypeR38xlarge, + InstanceTypeR4Large, + InstanceTypeR4Xlarge, + InstanceTypeR42xlarge, + InstanceTypeR44xlarge, + InstanceTypeR48xlarge, + InstanceTypeR416xlarge, + InstanceTypeR5Large, + InstanceTypeR5Xlarge, + InstanceTypeR52xlarge, + InstanceTypeR54xlarge, + InstanceTypeR58xlarge, + InstanceTypeR512xlarge, + InstanceTypeR516xlarge, + InstanceTypeR524xlarge, + InstanceTypeR5Metal, + InstanceTypeR5aLarge, + InstanceTypeR5aXlarge, + InstanceTypeR5a2xlarge, + InstanceTypeR5a4xlarge, + InstanceTypeR5a8xlarge, + InstanceTypeR5a12xlarge, + InstanceTypeR5a16xlarge, + InstanceTypeR5a24xlarge, + InstanceTypeR5dLarge, + InstanceTypeR5dXlarge, + InstanceTypeR5d2xlarge, + InstanceTypeR5d4xlarge, + InstanceTypeR5d8xlarge, + InstanceTypeR5d12xlarge, + InstanceTypeR5d16xlarge, + InstanceTypeR5d24xlarge, + InstanceTypeR5dMetal, + InstanceTypeR5adLarge, + InstanceTypeR5adXlarge, + InstanceTypeR5ad2xlarge, + InstanceTypeR5ad4xlarge, + InstanceTypeR5ad8xlarge, + InstanceTypeR5ad12xlarge, + InstanceTypeR5ad16xlarge, + InstanceTypeR5ad24xlarge, + InstanceTypeR6gMetal, + InstanceTypeR6gMedium, + InstanceTypeR6gLarge, + InstanceTypeR6gXlarge, + InstanceTypeR6g2xlarge, + InstanceTypeR6g4xlarge, + InstanceTypeR6g8xlarge, + InstanceTypeR6g12xlarge, + InstanceTypeR6g16xlarge, + InstanceTypeR6gdMetal, + InstanceTypeR6gdMedium, + InstanceTypeR6gdLarge, + InstanceTypeR6gdXlarge, + InstanceTypeR6gd2xlarge, + InstanceTypeR6gd4xlarge, + InstanceTypeR6gd8xlarge, + InstanceTypeR6gd12xlarge, + InstanceTypeR6gd16xlarge, + InstanceTypeX116xlarge, + InstanceTypeX132xlarge, + InstanceTypeX1eXlarge, + InstanceTypeX1e2xlarge, + InstanceTypeX1e4xlarge, + InstanceTypeX1e8xlarge, + InstanceTypeX1e16xlarge, + InstanceTypeX1e32xlarge, + InstanceTypeI2Xlarge, + InstanceTypeI22xlarge, + InstanceTypeI24xlarge, + InstanceTypeI28xlarge, + InstanceTypeI3Large, + InstanceTypeI3Xlarge, + InstanceTypeI32xlarge, + InstanceTypeI34xlarge, + InstanceTypeI38xlarge, + InstanceTypeI316xlarge, + InstanceTypeI3Metal, + InstanceTypeI3enLarge, + InstanceTypeI3enXlarge, + InstanceTypeI3en2xlarge, + InstanceTypeI3en3xlarge, + InstanceTypeI3en6xlarge, + InstanceTypeI3en12xlarge, + InstanceTypeI3en24xlarge, + InstanceTypeI3enMetal, + InstanceTypeHi14xlarge, + InstanceTypeHs18xlarge, + InstanceTypeC1Medium, + InstanceTypeC1Xlarge, + InstanceTypeC3Large, + InstanceTypeC3Xlarge, + InstanceTypeC32xlarge, + InstanceTypeC34xlarge, + InstanceTypeC38xlarge, + InstanceTypeC4Large, + InstanceTypeC4Xlarge, + InstanceTypeC42xlarge, + InstanceTypeC44xlarge, + InstanceTypeC48xlarge, + InstanceTypeC5Large, + InstanceTypeC5Xlarge, + InstanceTypeC52xlarge, + InstanceTypeC54xlarge, + InstanceTypeC59xlarge, + InstanceTypeC512xlarge, + InstanceTypeC518xlarge, + InstanceTypeC524xlarge, + InstanceTypeC5Metal, + InstanceTypeC5aLarge, + InstanceTypeC5aXlarge, + InstanceTypeC5a2xlarge, + InstanceTypeC5a4xlarge, + InstanceTypeC5a8xlarge, + InstanceTypeC5a12xlarge, + InstanceTypeC5a16xlarge, + InstanceTypeC5a24xlarge, + InstanceTypeC5adLarge, + InstanceTypeC5adXlarge, + InstanceTypeC5ad2xlarge, + InstanceTypeC5ad4xlarge, + InstanceTypeC5ad8xlarge, + InstanceTypeC5ad12xlarge, + InstanceTypeC5ad16xlarge, + InstanceTypeC5ad24xlarge, + InstanceTypeC5dLarge, + InstanceTypeC5dXlarge, + InstanceTypeC5d2xlarge, + InstanceTypeC5d4xlarge, + InstanceTypeC5d9xlarge, + InstanceTypeC5d12xlarge, + InstanceTypeC5d18xlarge, + InstanceTypeC5d24xlarge, + InstanceTypeC5dMetal, + InstanceTypeC5nLarge, + InstanceTypeC5nXlarge, + InstanceTypeC5n2xlarge, + InstanceTypeC5n4xlarge, + InstanceTypeC5n9xlarge, + InstanceTypeC5n18xlarge, + InstanceTypeC6gMetal, + InstanceTypeC6gMedium, + InstanceTypeC6gLarge, + InstanceTypeC6gXlarge, + InstanceTypeC6g2xlarge, + InstanceTypeC6g4xlarge, + InstanceTypeC6g8xlarge, + InstanceTypeC6g12xlarge, + InstanceTypeC6g16xlarge, + InstanceTypeC6gdMetal, + InstanceTypeC6gdMedium, + InstanceTypeC6gdLarge, + InstanceTypeC6gdXlarge, + InstanceTypeC6gd2xlarge, + InstanceTypeC6gd4xlarge, + InstanceTypeC6gd8xlarge, + InstanceTypeC6gd12xlarge, + InstanceTypeC6gd16xlarge, + InstanceTypeCc14xlarge, + InstanceTypeCc28xlarge, + InstanceTypeG22xlarge, + InstanceTypeG28xlarge, + InstanceTypeG34xlarge, + InstanceTypeG38xlarge, + InstanceTypeG316xlarge, + InstanceTypeG3sXlarge, + InstanceTypeG4dnXlarge, + InstanceTypeG4dn2xlarge, + InstanceTypeG4dn4xlarge, + InstanceTypeG4dn8xlarge, + InstanceTypeG4dn12xlarge, + InstanceTypeG4dn16xlarge, + InstanceTypeG4dnMetal, + InstanceTypeCg14xlarge, + InstanceTypeP2Xlarge, + InstanceTypeP28xlarge, + InstanceTypeP216xlarge, + InstanceTypeP32xlarge, + InstanceTypeP38xlarge, + InstanceTypeP316xlarge, + InstanceTypeP3dn24xlarge, + InstanceTypeP4d24xlarge, + InstanceTypeD2Xlarge, + InstanceTypeD22xlarge, + InstanceTypeD24xlarge, + InstanceTypeD28xlarge, + InstanceTypeF12xlarge, + InstanceTypeF14xlarge, + InstanceTypeF116xlarge, + InstanceTypeM5Large, + InstanceTypeM5Xlarge, + InstanceTypeM52xlarge, + InstanceTypeM54xlarge, + InstanceTypeM58xlarge, + InstanceTypeM512xlarge, + InstanceTypeM516xlarge, + InstanceTypeM524xlarge, + InstanceTypeM5Metal, + InstanceTypeM5aLarge, + InstanceTypeM5aXlarge, + InstanceTypeM5a2xlarge, + InstanceTypeM5a4xlarge, + InstanceTypeM5a8xlarge, + InstanceTypeM5a12xlarge, + InstanceTypeM5a16xlarge, + InstanceTypeM5a24xlarge, + InstanceTypeM5dLarge, + InstanceTypeM5dXlarge, + InstanceTypeM5d2xlarge, + InstanceTypeM5d4xlarge, + InstanceTypeM5d8xlarge, + InstanceTypeM5d12xlarge, + InstanceTypeM5d16xlarge, + InstanceTypeM5d24xlarge, + InstanceTypeM5dMetal, + InstanceTypeM5adLarge, + InstanceTypeM5adXlarge, + InstanceTypeM5ad2xlarge, + InstanceTypeM5ad4xlarge, + InstanceTypeM5ad8xlarge, + InstanceTypeM5ad12xlarge, + InstanceTypeM5ad16xlarge, + InstanceTypeM5ad24xlarge, + InstanceTypeH12xlarge, + InstanceTypeH14xlarge, + InstanceTypeH18xlarge, + InstanceTypeH116xlarge, + InstanceTypeZ1dLarge, + InstanceTypeZ1dXlarge, + InstanceTypeZ1d2xlarge, + InstanceTypeZ1d3xlarge, + InstanceTypeZ1d6xlarge, + InstanceTypeZ1d12xlarge, + InstanceTypeZ1dMetal, + InstanceTypeU6tb1Metal, + InstanceTypeU9tb1Metal, + InstanceTypeU12tb1Metal, + InstanceTypeU18tb1Metal, + InstanceTypeU24tb1Metal, + InstanceTypeA1Medium, + InstanceTypeA1Large, + InstanceTypeA1Xlarge, + InstanceTypeA12xlarge, + InstanceTypeA14xlarge, + InstanceTypeA1Metal, + InstanceTypeM5dnLarge, + InstanceTypeM5dnXlarge, + InstanceTypeM5dn2xlarge, + InstanceTypeM5dn4xlarge, + InstanceTypeM5dn8xlarge, + InstanceTypeM5dn12xlarge, + InstanceTypeM5dn16xlarge, + InstanceTypeM5dn24xlarge, + InstanceTypeM5nLarge, + InstanceTypeM5nXlarge, + InstanceTypeM5n2xlarge, + InstanceTypeM5n4xlarge, + InstanceTypeM5n8xlarge, + InstanceTypeM5n12xlarge, + InstanceTypeM5n16xlarge, + InstanceTypeM5n24xlarge, + InstanceTypeR5dnLarge, + InstanceTypeR5dnXlarge, + InstanceTypeR5dn2xlarge, + InstanceTypeR5dn4xlarge, + InstanceTypeR5dn8xlarge, + InstanceTypeR5dn12xlarge, + InstanceTypeR5dn16xlarge, + InstanceTypeR5dn24xlarge, + InstanceTypeR5nLarge, + InstanceTypeR5nXlarge, + InstanceTypeR5n2xlarge, + InstanceTypeR5n4xlarge, + InstanceTypeR5n8xlarge, + InstanceTypeR5n12xlarge, + InstanceTypeR5n16xlarge, + InstanceTypeR5n24xlarge, + InstanceTypeInf1Xlarge, + InstanceTypeInf12xlarge, + InstanceTypeInf16xlarge, + InstanceTypeInf124xlarge, + InstanceTypeM6gMetal, + InstanceTypeM6gMedium, + InstanceTypeM6gLarge, + InstanceTypeM6gXlarge, + InstanceTypeM6g2xlarge, + InstanceTypeM6g4xlarge, + InstanceTypeM6g8xlarge, + InstanceTypeM6g12xlarge, + InstanceTypeM6g16xlarge, + InstanceTypeM6gdMetal, + InstanceTypeM6gdMedium, + InstanceTypeM6gdLarge, + InstanceTypeM6gdXlarge, + InstanceTypeM6gd2xlarge, + InstanceTypeM6gd4xlarge, + InstanceTypeM6gd8xlarge, + InstanceTypeM6gd12xlarge, + InstanceTypeM6gd16xlarge, + } +} + const ( // InstanceTypeHypervisorNitro is a InstanceTypeHypervisor enum value InstanceTypeHypervisorNitro = "nitro" @@ -108516,6 +119597,14 @@ const ( InstanceTypeHypervisorXen = "xen" ) +// InstanceTypeHypervisor_Values returns all elements of the InstanceTypeHypervisor enum +func InstanceTypeHypervisor_Values() []string { + return []string{ + InstanceTypeHypervisorNitro, + InstanceTypeHypervisorXen, + } +} + const ( // InterfacePermissionTypeInstanceAttach is a InterfacePermissionType enum value InterfacePermissionTypeInstanceAttach = "INSTANCE-ATTACH" @@ -108524,6 +119613,14 @@ const ( InterfacePermissionTypeEipAssociate = "EIP-ASSOCIATE" ) +// InterfacePermissionType_Values returns all elements of the InterfacePermissionType enum +func InterfacePermissionType_Values() []string { + return []string{ + InterfacePermissionTypeInstanceAttach, + InterfacePermissionTypeEipAssociate, + } +} + const ( // Ipv6SupportValueEnable is a Ipv6SupportValue enum value Ipv6SupportValueEnable = "enable" @@ -108532,6 +119629,14 @@ const ( Ipv6SupportValueDisable = "disable" ) +// Ipv6SupportValue_Values returns all elements of the Ipv6SupportValue enum +func Ipv6SupportValue_Values() []string { + return []string{ + Ipv6SupportValueEnable, + Ipv6SupportValueDisable, + } +} + const ( // LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist is a LaunchTemplateErrorCode enum value LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist = "launchTemplateIdDoesNotExist" @@ -108552,6 +119657,18 @@ const ( LaunchTemplateErrorCodeUnexpectedError = "unexpectedError" ) +// LaunchTemplateErrorCode_Values returns all elements of the LaunchTemplateErrorCode enum +func LaunchTemplateErrorCode_Values() []string { + return []string{ + LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist, + LaunchTemplateErrorCodeLaunchTemplateIdMalformed, + LaunchTemplateErrorCodeLaunchTemplateNameDoesNotExist, + LaunchTemplateErrorCodeLaunchTemplateNameMalformed, + LaunchTemplateErrorCodeLaunchTemplateVersionDoesNotExist, + LaunchTemplateErrorCodeUnexpectedError, + } +} + const ( // LaunchTemplateHttpTokensStateOptional is a LaunchTemplateHttpTokensState enum value LaunchTemplateHttpTokensStateOptional = "optional" @@ -108560,6 +119677,14 @@ const ( LaunchTemplateHttpTokensStateRequired = "required" ) +// LaunchTemplateHttpTokensState_Values returns all elements of the LaunchTemplateHttpTokensState enum +func LaunchTemplateHttpTokensState_Values() []string { + return []string{ + LaunchTemplateHttpTokensStateOptional, + LaunchTemplateHttpTokensStateRequired, + } +} + const ( // LaunchTemplateInstanceMetadataEndpointStateDisabled is a LaunchTemplateInstanceMetadataEndpointState enum value LaunchTemplateInstanceMetadataEndpointStateDisabled = "disabled" @@ -108568,6 +119693,14 @@ const ( LaunchTemplateInstanceMetadataEndpointStateEnabled = "enabled" ) +// LaunchTemplateInstanceMetadataEndpointState_Values returns all elements of the LaunchTemplateInstanceMetadataEndpointState enum +func LaunchTemplateInstanceMetadataEndpointState_Values() []string { + return []string{ + LaunchTemplateInstanceMetadataEndpointStateDisabled, + LaunchTemplateInstanceMetadataEndpointStateEnabled, + } +} + const ( // LaunchTemplateInstanceMetadataOptionsStatePending is a LaunchTemplateInstanceMetadataOptionsState enum value LaunchTemplateInstanceMetadataOptionsStatePending = "pending" @@ -108576,6 +119709,14 @@ const ( LaunchTemplateInstanceMetadataOptionsStateApplied = "applied" ) +// LaunchTemplateInstanceMetadataOptionsState_Values returns all elements of the LaunchTemplateInstanceMetadataOptionsState enum +func LaunchTemplateInstanceMetadataOptionsState_Values() []string { + return []string{ + LaunchTemplateInstanceMetadataOptionsStatePending, + LaunchTemplateInstanceMetadataOptionsStateApplied, + } +} + const ( // ListingStateAvailable is a ListingState enum value ListingStateAvailable = "available" @@ -108590,6 +119731,16 @@ const ( ListingStatePending = "pending" ) +// ListingState_Values returns all elements of the ListingState enum +func ListingState_Values() []string { + return []string{ + ListingStateAvailable, + ListingStateSold, + ListingStateCancelled, + ListingStatePending, + } +} + const ( // ListingStatusActive is a ListingStatus enum value ListingStatusActive = "active" @@ -108604,6 +119755,16 @@ const ( ListingStatusClosed = "closed" ) +// ListingStatus_Values returns all elements of the ListingStatus enum +func ListingStatus_Values() []string { + return []string{ + ListingStatusActive, + ListingStatusPending, + ListingStatusCancelled, + ListingStatusClosed, + } +} + const ( // LocalGatewayRouteStatePending is a LocalGatewayRouteState enum value LocalGatewayRouteStatePending = "pending" @@ -108621,6 +119782,17 @@ const ( LocalGatewayRouteStateDeleted = "deleted" ) +// LocalGatewayRouteState_Values returns all elements of the LocalGatewayRouteState enum +func LocalGatewayRouteState_Values() []string { + return []string{ + LocalGatewayRouteStatePending, + LocalGatewayRouteStateActive, + LocalGatewayRouteStateBlackhole, + LocalGatewayRouteStateDeleting, + LocalGatewayRouteStateDeleted, + } +} + const ( // LocalGatewayRouteTypeStatic is a LocalGatewayRouteType enum value LocalGatewayRouteTypeStatic = "static" @@ -108629,6 +119801,14 @@ const ( LocalGatewayRouteTypePropagated = "propagated" ) +// LocalGatewayRouteType_Values returns all elements of the LocalGatewayRouteType enum +func LocalGatewayRouteType_Values() []string { + return []string{ + LocalGatewayRouteTypeStatic, + LocalGatewayRouteTypePropagated, + } +} + const ( // LocationTypeRegion is a LocationType enum value LocationTypeRegion = "region" @@ -108640,6 +119820,15 @@ const ( LocationTypeAvailabilityZoneId = "availability-zone-id" ) +// LocationType_Values returns all elements of the LocationType enum +func LocationType_Values() []string { + return []string{ + LocationTypeRegion, + LocationTypeAvailabilityZone, + LocationTypeAvailabilityZoneId, + } +} + const ( // LogDestinationTypeCloudWatchLogs is a LogDestinationType enum value LogDestinationTypeCloudWatchLogs = "cloud-watch-logs" @@ -108648,11 +119837,26 @@ const ( LogDestinationTypeS3 = "s3" ) +// LogDestinationType_Values returns all elements of the LogDestinationType enum +func LogDestinationType_Values() []string { + return []string{ + LogDestinationTypeCloudWatchLogs, + LogDestinationTypeS3, + } +} + const ( // MarketTypeSpot is a MarketType enum value MarketTypeSpot = "spot" ) +// MarketType_Values returns all elements of the MarketType enum +func MarketType_Values() []string { + return []string{ + MarketTypeSpot, + } +} + const ( // MembershipTypeStatic is a MembershipType enum value MembershipTypeStatic = "static" @@ -108661,6 +119865,30 @@ const ( MembershipTypeIgmp = "igmp" ) +// MembershipType_Values returns all elements of the MembershipType enum +func MembershipType_Values() []string { + return []string{ + MembershipTypeStatic, + MembershipTypeIgmp, + } +} + +const ( + // ModifyAvailabilityZoneOptInStatusOptedIn is a ModifyAvailabilityZoneOptInStatus enum value + ModifyAvailabilityZoneOptInStatusOptedIn = "opted-in" + + // ModifyAvailabilityZoneOptInStatusNotOptedIn is a ModifyAvailabilityZoneOptInStatus enum value + ModifyAvailabilityZoneOptInStatusNotOptedIn = "not-opted-in" +) + +// ModifyAvailabilityZoneOptInStatus_Values returns all elements of the ModifyAvailabilityZoneOptInStatus enum +func ModifyAvailabilityZoneOptInStatus_Values() []string { + return []string{ + ModifyAvailabilityZoneOptInStatusOptedIn, + ModifyAvailabilityZoneOptInStatusNotOptedIn, + } +} + const ( // MonitoringStateDisabled is a MonitoringState enum value MonitoringStateDisabled = "disabled" @@ -108675,6 +119903,16 @@ const ( MonitoringStatePending = "pending" ) +// MonitoringState_Values returns all elements of the MonitoringState enum +func MonitoringState_Values() []string { + return []string{ + MonitoringStateDisabled, + MonitoringStateDisabling, + MonitoringStateEnabled, + MonitoringStatePending, + } +} + const ( // MoveStatusMovingToVpc is a MoveStatus enum value MoveStatusMovingToVpc = "movingToVpc" @@ -108683,6 +119921,14 @@ const ( MoveStatusRestoringToClassic = "restoringToClassic" ) +// MoveStatus_Values returns all elements of the MoveStatus enum +func MoveStatus_Values() []string { + return []string{ + MoveStatusMovingToVpc, + MoveStatusRestoringToClassic, + } +} + const ( // MulticastSupportValueEnable is a MulticastSupportValue enum value MulticastSupportValueEnable = "enable" @@ -108691,6 +119937,14 @@ const ( MulticastSupportValueDisable = "disable" ) +// MulticastSupportValue_Values returns all elements of the MulticastSupportValue enum +func MulticastSupportValue_Values() []string { + return []string{ + MulticastSupportValueEnable, + MulticastSupportValueDisable, + } +} + const ( // NatGatewayStatePending is a NatGatewayState enum value NatGatewayStatePending = "pending" @@ -108708,6 +119962,17 @@ const ( NatGatewayStateDeleted = "deleted" ) +// NatGatewayState_Values returns all elements of the NatGatewayState enum +func NatGatewayState_Values() []string { + return []string{ + NatGatewayStatePending, + NatGatewayStateFailed, + NatGatewayStateAvailable, + NatGatewayStateDeleting, + NatGatewayStateDeleted, + } +} + const ( // NetworkInterfaceAttributeDescription is a NetworkInterfaceAttribute enum value NetworkInterfaceAttributeDescription = "description" @@ -108722,11 +119987,28 @@ const ( NetworkInterfaceAttributeAttachment = "attachment" ) +// NetworkInterfaceAttribute_Values returns all elements of the NetworkInterfaceAttribute enum +func NetworkInterfaceAttribute_Values() []string { + return []string{ + NetworkInterfaceAttributeDescription, + NetworkInterfaceAttributeGroupSet, + NetworkInterfaceAttributeSourceDestCheck, + NetworkInterfaceAttributeAttachment, + } +} + const ( // NetworkInterfaceCreationTypeEfa is a NetworkInterfaceCreationType enum value NetworkInterfaceCreationTypeEfa = "efa" ) +// NetworkInterfaceCreationType_Values returns all elements of the NetworkInterfaceCreationType enum +func NetworkInterfaceCreationType_Values() []string { + return []string{ + NetworkInterfaceCreationTypeEfa, + } +} + const ( // NetworkInterfacePermissionStateCodePending is a NetworkInterfacePermissionStateCode enum value NetworkInterfacePermissionStateCodePending = "pending" @@ -108741,6 +120023,16 @@ const ( NetworkInterfacePermissionStateCodeRevoked = "revoked" ) +// NetworkInterfacePermissionStateCode_Values returns all elements of the NetworkInterfacePermissionStateCode enum +func NetworkInterfacePermissionStateCode_Values() []string { + return []string{ + NetworkInterfacePermissionStateCodePending, + NetworkInterfacePermissionStateCodeGranted, + NetworkInterfacePermissionStateCodeRevoking, + NetworkInterfacePermissionStateCodeRevoked, + } +} + const ( // NetworkInterfaceStatusAvailable is a NetworkInterfaceStatus enum value NetworkInterfaceStatusAvailable = "available" @@ -108758,6 +120050,17 @@ const ( NetworkInterfaceStatusDetaching = "detaching" ) +// NetworkInterfaceStatus_Values returns all elements of the NetworkInterfaceStatus enum +func NetworkInterfaceStatus_Values() []string { + return []string{ + NetworkInterfaceStatusAvailable, + NetworkInterfaceStatusAssociated, + NetworkInterfaceStatusAttaching, + NetworkInterfaceStatusInUse, + NetworkInterfaceStatusDetaching, + } +} + const ( // NetworkInterfaceTypeInterface is a NetworkInterfaceType enum value NetworkInterfaceTypeInterface = "interface" @@ -108769,6 +120072,15 @@ const ( NetworkInterfaceTypeEfa = "efa" ) +// NetworkInterfaceType_Values returns all elements of the NetworkInterfaceType enum +func NetworkInterfaceType_Values() []string { + return []string{ + NetworkInterfaceTypeInterface, + NetworkInterfaceTypeNatGateway, + NetworkInterfaceTypeEfa, + } +} + const ( // OfferingClassTypeStandard is a OfferingClassType enum value OfferingClassTypeStandard = "standard" @@ -108777,6 +120089,14 @@ const ( OfferingClassTypeConvertible = "convertible" ) +// OfferingClassType_Values returns all elements of the OfferingClassType enum +func OfferingClassType_Values() []string { + return []string{ + OfferingClassTypeStandard, + OfferingClassTypeConvertible, + } +} + const ( // OfferingTypeValuesHeavyUtilization is a OfferingTypeValues enum value OfferingTypeValuesHeavyUtilization = "Heavy Utilization" @@ -108797,6 +120117,18 @@ const ( OfferingTypeValuesAllUpfront = "All Upfront" ) +// OfferingTypeValues_Values returns all elements of the OfferingTypeValues enum +func OfferingTypeValues_Values() []string { + return []string{ + OfferingTypeValuesHeavyUtilization, + OfferingTypeValuesMediumUtilization, + OfferingTypeValuesLightUtilization, + OfferingTypeValuesNoUpfront, + OfferingTypeValuesPartialUpfront, + OfferingTypeValuesAllUpfront, + } +} + const ( // OnDemandAllocationStrategyLowestPrice is a OnDemandAllocationStrategy enum value OnDemandAllocationStrategyLowestPrice = "lowestPrice" @@ -108805,6 +120137,14 @@ const ( OnDemandAllocationStrategyPrioritized = "prioritized" ) +// OnDemandAllocationStrategy_Values returns all elements of the OnDemandAllocationStrategy enum +func OnDemandAllocationStrategy_Values() []string { + return []string{ + OnDemandAllocationStrategyLowestPrice, + OnDemandAllocationStrategyPrioritized, + } +} + const ( // OperationTypeAdd is a OperationType enum value OperationTypeAdd = "add" @@ -108813,6 +120153,14 @@ const ( OperationTypeRemove = "remove" ) +// OperationType_Values returns all elements of the OperationType enum +func OperationType_Values() []string { + return []string{ + OperationTypeAdd, + OperationTypeRemove, + } +} + const ( // PaymentOptionAllUpfront is a PaymentOption enum value PaymentOptionAllUpfront = "AllUpfront" @@ -108824,11 +120172,27 @@ const ( PaymentOptionNoUpfront = "NoUpfront" ) +// PaymentOption_Values returns all elements of the PaymentOption enum +func PaymentOption_Values() []string { + return []string{ + PaymentOptionAllUpfront, + PaymentOptionPartialUpfront, + PaymentOptionNoUpfront, + } +} + const ( // PermissionGroupAll is a PermissionGroup enum value PermissionGroupAll = "all" ) +// PermissionGroup_Values returns all elements of the PermissionGroup enum +func PermissionGroup_Values() []string { + return []string{ + PermissionGroupAll, + } +} + const ( // PlacementGroupStatePending is a PlacementGroupState enum value PlacementGroupStatePending = "pending" @@ -108843,6 +120207,16 @@ const ( PlacementGroupStateDeleted = "deleted" ) +// PlacementGroupState_Values returns all elements of the PlacementGroupState enum +func PlacementGroupState_Values() []string { + return []string{ + PlacementGroupStatePending, + PlacementGroupStateAvailable, + PlacementGroupStateDeleting, + PlacementGroupStateDeleted, + } +} + const ( // PlacementGroupStrategyCluster is a PlacementGroupStrategy enum value PlacementGroupStrategyCluster = "cluster" @@ -108854,6 +120228,15 @@ const ( PlacementGroupStrategySpread = "spread" ) +// PlacementGroupStrategy_Values returns all elements of the PlacementGroupStrategy enum +func PlacementGroupStrategy_Values() []string { + return []string{ + PlacementGroupStrategyCluster, + PlacementGroupStrategyPartition, + PlacementGroupStrategySpread, + } +} + const ( // PlacementStrategyCluster is a PlacementStrategy enum value PlacementStrategyCluster = "cluster" @@ -108865,11 +120248,83 @@ const ( PlacementStrategyPartition = "partition" ) +// PlacementStrategy_Values returns all elements of the PlacementStrategy enum +func PlacementStrategy_Values() []string { + return []string{ + PlacementStrategyCluster, + PlacementStrategySpread, + PlacementStrategyPartition, + } +} + const ( // PlatformValuesWindows is a PlatformValues enum value PlatformValuesWindows = "Windows" ) +// PlatformValues_Values returns all elements of the PlatformValues enum +func PlatformValues_Values() []string { + return []string{ + PlatformValuesWindows, + } +} + +const ( + // PrefixListStateCreateInProgress is a PrefixListState enum value + PrefixListStateCreateInProgress = "create-in-progress" + + // PrefixListStateCreateComplete is a PrefixListState enum value + PrefixListStateCreateComplete = "create-complete" + + // PrefixListStateCreateFailed is a PrefixListState enum value + PrefixListStateCreateFailed = "create-failed" + + // PrefixListStateModifyInProgress is a PrefixListState enum value + PrefixListStateModifyInProgress = "modify-in-progress" + + // PrefixListStateModifyComplete is a PrefixListState enum value + PrefixListStateModifyComplete = "modify-complete" + + // PrefixListStateModifyFailed is a PrefixListState enum value + PrefixListStateModifyFailed = "modify-failed" + + // PrefixListStateRestoreInProgress is a PrefixListState enum value + PrefixListStateRestoreInProgress = "restore-in-progress" + + // PrefixListStateRestoreComplete is a PrefixListState enum value + PrefixListStateRestoreComplete = "restore-complete" + + // PrefixListStateRestoreFailed is a PrefixListState enum value + PrefixListStateRestoreFailed = "restore-failed" + + // PrefixListStateDeleteInProgress is a PrefixListState enum value + PrefixListStateDeleteInProgress = "delete-in-progress" + + // PrefixListStateDeleteComplete is a PrefixListState enum value + PrefixListStateDeleteComplete = "delete-complete" + + // PrefixListStateDeleteFailed is a PrefixListState enum value + PrefixListStateDeleteFailed = "delete-failed" +) + +// PrefixListState_Values returns all elements of the PrefixListState enum +func PrefixListState_Values() []string { + return []string{ + PrefixListStateCreateInProgress, + PrefixListStateCreateComplete, + PrefixListStateCreateFailed, + PrefixListStateModifyInProgress, + PrefixListStateModifyComplete, + PrefixListStateModifyFailed, + PrefixListStateRestoreInProgress, + PrefixListStateRestoreComplete, + PrefixListStateRestoreFailed, + PrefixListStateDeleteInProgress, + PrefixListStateDeleteComplete, + PrefixListStateDeleteFailed, + } +} + const ( // PrincipalTypeAll is a PrincipalType enum value PrincipalTypeAll = "All" @@ -108890,6 +120345,18 @@ const ( PrincipalTypeRole = "Role" ) +// PrincipalType_Values returns all elements of the PrincipalType enum +func PrincipalType_Values() []string { + return []string{ + PrincipalTypeAll, + PrincipalTypeService, + PrincipalTypeOrganizationUnit, + PrincipalTypeAccount, + PrincipalTypeUser, + PrincipalTypeRole, + } +} + const ( // ProductCodeValuesDevpay is a ProductCodeValues enum value ProductCodeValuesDevpay = "devpay" @@ -108898,6 +120365,14 @@ const ( ProductCodeValuesMarketplace = "marketplace" ) +// ProductCodeValues_Values returns all elements of the ProductCodeValues enum +func ProductCodeValues_Values() []string { + return []string{ + ProductCodeValuesDevpay, + ProductCodeValuesMarketplace, + } +} + const ( // RIProductDescriptionLinuxUnix is a RIProductDescription enum value RIProductDescriptionLinuxUnix = "Linux/UNIX" @@ -108912,11 +120387,40 @@ const ( RIProductDescriptionWindowsAmazonVpc = "Windows (Amazon VPC)" ) +// RIProductDescription_Values returns all elements of the RIProductDescription enum +func RIProductDescription_Values() []string { + return []string{ + RIProductDescriptionLinuxUnix, + RIProductDescriptionLinuxUnixamazonVpc, + RIProductDescriptionWindows, + RIProductDescriptionWindowsAmazonVpc, + } +} + const ( // RecurringChargeFrequencyHourly is a RecurringChargeFrequency enum value RecurringChargeFrequencyHourly = "Hourly" ) +// RecurringChargeFrequency_Values returns all elements of the RecurringChargeFrequency enum +func RecurringChargeFrequency_Values() []string { + return []string{ + RecurringChargeFrequencyHourly, + } +} + +const ( + // ReplacementStrategyLaunch is a ReplacementStrategy enum value + ReplacementStrategyLaunch = "launch" +) + +// ReplacementStrategy_Values returns all elements of the ReplacementStrategy enum +func ReplacementStrategy_Values() []string { + return []string{ + ReplacementStrategyLaunch, + } +} + const ( // ReportInstanceReasonCodesInstanceStuckInState is a ReportInstanceReasonCodes enum value ReportInstanceReasonCodesInstanceStuckInState = "instance-stuck-in-state" @@ -108946,6 +120450,21 @@ const ( ReportInstanceReasonCodesOther = "other" ) +// ReportInstanceReasonCodes_Values returns all elements of the ReportInstanceReasonCodes enum +func ReportInstanceReasonCodes_Values() []string { + return []string{ + ReportInstanceReasonCodesInstanceStuckInState, + ReportInstanceReasonCodesUnresponsive, + ReportInstanceReasonCodesNotAcceptingCredentials, + ReportInstanceReasonCodesPasswordNotAvailable, + ReportInstanceReasonCodesPerformanceNetwork, + ReportInstanceReasonCodesPerformanceInstanceStore, + ReportInstanceReasonCodesPerformanceEbsVolume, + ReportInstanceReasonCodesPerformanceOther, + ReportInstanceReasonCodesOther, + } +} + const ( // ReportStatusTypeOk is a ReportStatusType enum value ReportStatusTypeOk = "ok" @@ -108954,6 +120473,14 @@ const ( ReportStatusTypeImpaired = "impaired" ) +// ReportStatusType_Values returns all elements of the ReportStatusType enum +func ReportStatusType_Values() []string { + return []string{ + ReportStatusTypeOk, + ReportStatusTypeImpaired, + } +} + const ( // ReservationStatePaymentPending is a ReservationState enum value ReservationStatePaymentPending = "payment-pending" @@ -108968,6 +120495,16 @@ const ( ReservationStateRetired = "retired" ) +// ReservationState_Values returns all elements of the ReservationState enum +func ReservationState_Values() []string { + return []string{ + ReservationStatePaymentPending, + ReservationStatePaymentFailed, + ReservationStateActive, + ReservationStateRetired, + } +} + const ( // ReservedInstanceStatePaymentPending is a ReservedInstanceState enum value ReservedInstanceStatePaymentPending = "payment-pending" @@ -108988,16 +120525,42 @@ const ( ReservedInstanceStateQueuedDeleted = "queued-deleted" ) +// ReservedInstanceState_Values returns all elements of the ReservedInstanceState enum +func ReservedInstanceState_Values() []string { + return []string{ + ReservedInstanceStatePaymentPending, + ReservedInstanceStateActive, + ReservedInstanceStatePaymentFailed, + ReservedInstanceStateRetired, + ReservedInstanceStateQueued, + ReservedInstanceStateQueuedDeleted, + } +} + const ( // ResetFpgaImageAttributeNameLoadPermission is a ResetFpgaImageAttributeName enum value ResetFpgaImageAttributeNameLoadPermission = "loadPermission" ) +// ResetFpgaImageAttributeName_Values returns all elements of the ResetFpgaImageAttributeName enum +func ResetFpgaImageAttributeName_Values() []string { + return []string{ + ResetFpgaImageAttributeNameLoadPermission, + } +} + const ( // ResetImageAttributeNameLaunchPermission is a ResetImageAttributeName enum value ResetImageAttributeNameLaunchPermission = "launchPermission" ) +// ResetImageAttributeName_Values returns all elements of the ResetImageAttributeName enum +func ResetImageAttributeName_Values() []string { + return []string{ + ResetImageAttributeNameLaunchPermission, + } +} + const ( // ResourceTypeClientVpnEndpoint is a ResourceType enum value ResourceTypeClientVpnEndpoint = "client-vpn-endpoint" @@ -109011,9 +120574,21 @@ const ( // ResourceTypeDhcpOptions is a ResourceType enum value ResourceTypeDhcpOptions = "dhcp-options" + // ResourceTypeEgressOnlyInternetGateway is a ResourceType enum value + ResourceTypeEgressOnlyInternetGateway = "egress-only-internet-gateway" + // ResourceTypeElasticIp is a ResourceType enum value ResourceTypeElasticIp = "elastic-ip" + // ResourceTypeElasticGpu is a ResourceType enum value + ResourceTypeElasticGpu = "elastic-gpu" + + // ResourceTypeExportImageTask is a ResourceType enum value + ResourceTypeExportImageTask = "export-image-task" + + // ResourceTypeExportInstanceTask is a ResourceType enum value + ResourceTypeExportInstanceTask = "export-instance-task" + // ResourceTypeFleet is a ResourceType enum value ResourceTypeFleet = "fleet" @@ -109026,6 +120601,12 @@ const ( // ResourceTypeImage is a ResourceType enum value ResourceTypeImage = "image" + // ResourceTypeImportImageTask is a ResourceType enum value + ResourceTypeImportImageTask = "import-image-task" + + // ResourceTypeImportSnapshotTask is a ResourceType enum value + ResourceTypeImportSnapshotTask = "import-snapshot-task" + // ResourceTypeInstance is a ResourceType enum value ResourceTypeInstance = "instance" @@ -109038,6 +120619,9 @@ const ( // ResourceTypeLaunchTemplate is a ResourceType enum value ResourceTypeLaunchTemplate = "launch-template" + // ResourceTypeLocalGatewayRouteTableVpcAssociation is a ResourceType enum value + ResourceTypeLocalGatewayRouteTableVpcAssociation = "local-gateway-route-table-vpc-association" + // ResourceTypeNatgateway is a ResourceType enum value ResourceTypeNatgateway = "natgateway" @@ -109106,8 +120690,61 @@ const ( // ResourceTypeVpnGateway is a ResourceType enum value ResourceTypeVpnGateway = "vpn-gateway" + + // ResourceTypeVpcFlowLog is a ResourceType enum value + ResourceTypeVpcFlowLog = "vpc-flow-log" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeClientVpnEndpoint, + ResourceTypeCustomerGateway, + ResourceTypeDedicatedHost, + ResourceTypeDhcpOptions, + ResourceTypeEgressOnlyInternetGateway, + ResourceTypeElasticIp, + ResourceTypeElasticGpu, + ResourceTypeExportImageTask, + ResourceTypeExportInstanceTask, + ResourceTypeFleet, + ResourceTypeFpgaImage, + ResourceTypeHostReservation, + ResourceTypeImage, + ResourceTypeImportImageTask, + ResourceTypeImportSnapshotTask, + ResourceTypeInstance, + ResourceTypeInternetGateway, + ResourceTypeKeyPair, + ResourceTypeLaunchTemplate, + ResourceTypeLocalGatewayRouteTableVpcAssociation, + ResourceTypeNatgateway, + ResourceTypeNetworkAcl, + ResourceTypeNetworkInterface, + ResourceTypePlacementGroup, + ResourceTypeReservedInstances, + ResourceTypeRouteTable, + ResourceTypeSecurityGroup, + ResourceTypeSnapshot, + ResourceTypeSpotFleetRequest, + ResourceTypeSpotInstancesRequest, + ResourceTypeSubnet, + ResourceTypeTrafficMirrorFilter, + ResourceTypeTrafficMirrorSession, + ResourceTypeTrafficMirrorTarget, + ResourceTypeTransitGateway, + ResourceTypeTransitGatewayAttachment, + ResourceTypeTransitGatewayMulticastDomain, + ResourceTypeTransitGatewayRouteTable, + ResourceTypeVolume, + ResourceTypeVpc, + ResourceTypeVpcPeeringConnection, + ResourceTypeVpnConnection, + ResourceTypeVpnGateway, + ResourceTypeVpcFlowLog, + } +} + const ( // RootDeviceTypeEbs is a RootDeviceType enum value RootDeviceTypeEbs = "ebs" @@ -109116,6 +120753,14 @@ const ( RootDeviceTypeInstanceStore = "instance-store" ) +// RootDeviceType_Values returns all elements of the RootDeviceType enum +func RootDeviceType_Values() []string { + return []string{ + RootDeviceTypeEbs, + RootDeviceTypeInstanceStore, + } +} + const ( // RouteOriginCreateRouteTable is a RouteOrigin enum value RouteOriginCreateRouteTable = "CreateRouteTable" @@ -109127,6 +120772,15 @@ const ( RouteOriginEnableVgwRoutePropagation = "EnableVgwRoutePropagation" ) +// RouteOrigin_Values returns all elements of the RouteOrigin enum +func RouteOrigin_Values() []string { + return []string{ + RouteOriginCreateRouteTable, + RouteOriginCreateRoute, + RouteOriginEnableVgwRoutePropagation, + } +} + const ( // RouteStateActive is a RouteState enum value RouteStateActive = "active" @@ -109135,6 +120789,14 @@ const ( RouteStateBlackhole = "blackhole" ) +// RouteState_Values returns all elements of the RouteState enum +func RouteState_Values() []string { + return []string{ + RouteStateActive, + RouteStateBlackhole, + } +} + const ( // RouteTableAssociationStateCodeAssociating is a RouteTableAssociationStateCode enum value RouteTableAssociationStateCodeAssociating = "associating" @@ -109152,6 +120814,17 @@ const ( RouteTableAssociationStateCodeFailed = "failed" ) +// RouteTableAssociationStateCode_Values returns all elements of the RouteTableAssociationStateCode enum +func RouteTableAssociationStateCode_Values() []string { + return []string{ + RouteTableAssociationStateCodeAssociating, + RouteTableAssociationStateCodeAssociated, + RouteTableAssociationStateCodeDisassociating, + RouteTableAssociationStateCodeDisassociated, + RouteTableAssociationStateCodeFailed, + } +} + const ( // RuleActionAllow is a RuleAction enum value RuleActionAllow = "allow" @@ -109160,6 +120833,14 @@ const ( RuleActionDeny = "deny" ) +// RuleAction_Values returns all elements of the RuleAction enum +func RuleAction_Values() []string { + return []string{ + RuleActionAllow, + RuleActionDeny, + } +} + const ( // ScopeAvailabilityZone is a Scope enum value ScopeAvailabilityZone = "Availability Zone" @@ -109168,6 +120849,30 @@ const ( ScopeRegion = "Region" ) +// Scope_Values returns all elements of the Scope enum +func Scope_Values() []string { + return []string{ + ScopeAvailabilityZone, + ScopeRegion, + } +} + +const ( + // SelfServicePortalEnabled is a SelfServicePortal enum value + SelfServicePortalEnabled = "enabled" + + // SelfServicePortalDisabled is a SelfServicePortal enum value + SelfServicePortalDisabled = "disabled" +) + +// SelfServicePortal_Values returns all elements of the SelfServicePortal enum +func SelfServicePortal_Values() []string { + return []string{ + SelfServicePortalEnabled, + SelfServicePortalDisabled, + } +} + const ( // ServiceStatePending is a ServiceState enum value ServiceStatePending = "Pending" @@ -109185,6 +120890,17 @@ const ( ServiceStateFailed = "Failed" ) +// ServiceState_Values returns all elements of the ServiceState enum +func ServiceState_Values() []string { + return []string{ + ServiceStatePending, + ServiceStateAvailable, + ServiceStateDeleting, + ServiceStateDeleted, + ServiceStateFailed, + } +} + const ( // ServiceTypeInterface is a ServiceType enum value ServiceTypeInterface = "Interface" @@ -109193,6 +120909,14 @@ const ( ServiceTypeGateway = "Gateway" ) +// ServiceType_Values returns all elements of the ServiceType enum +func ServiceType_Values() []string { + return []string{ + ServiceTypeInterface, + ServiceTypeGateway, + } +} + const ( // ShutdownBehaviorStop is a ShutdownBehavior enum value ShutdownBehaviorStop = "stop" @@ -109201,6 +120925,14 @@ const ( ShutdownBehaviorTerminate = "terminate" ) +// ShutdownBehavior_Values returns all elements of the ShutdownBehavior enum +func ShutdownBehavior_Values() []string { + return []string{ + ShutdownBehaviorStop, + ShutdownBehaviorTerminate, + } +} + const ( // SnapshotAttributeNameProductCodes is a SnapshotAttributeName enum value SnapshotAttributeNameProductCodes = "productCodes" @@ -109209,6 +120941,14 @@ const ( SnapshotAttributeNameCreateVolumePermission = "createVolumePermission" ) +// SnapshotAttributeName_Values returns all elements of the SnapshotAttributeName enum +func SnapshotAttributeName_Values() []string { + return []string{ + SnapshotAttributeNameProductCodes, + SnapshotAttributeNameCreateVolumePermission, + } +} + const ( // SnapshotStatePending is a SnapshotState enum value SnapshotStatePending = "pending" @@ -109220,6 +120960,15 @@ const ( SnapshotStateError = "error" ) +// SnapshotState_Values returns all elements of the SnapshotState enum +func SnapshotState_Values() []string { + return []string{ + SnapshotStatePending, + SnapshotStateCompleted, + SnapshotStateError, + } +} + const ( // SpotAllocationStrategyLowestPrice is a SpotAllocationStrategy enum value SpotAllocationStrategyLowestPrice = "lowest-price" @@ -109231,6 +120980,15 @@ const ( SpotAllocationStrategyCapacityOptimized = "capacity-optimized" ) +// SpotAllocationStrategy_Values returns all elements of the SpotAllocationStrategy enum +func SpotAllocationStrategy_Values() []string { + return []string{ + SpotAllocationStrategyLowestPrice, + SpotAllocationStrategyDiversified, + SpotAllocationStrategyCapacityOptimized, + } +} + const ( // SpotInstanceInterruptionBehaviorHibernate is a SpotInstanceInterruptionBehavior enum value SpotInstanceInterruptionBehaviorHibernate = "hibernate" @@ -109242,6 +121000,15 @@ const ( SpotInstanceInterruptionBehaviorTerminate = "terminate" ) +// SpotInstanceInterruptionBehavior_Values returns all elements of the SpotInstanceInterruptionBehavior enum +func SpotInstanceInterruptionBehavior_Values() []string { + return []string{ + SpotInstanceInterruptionBehaviorHibernate, + SpotInstanceInterruptionBehaviorStop, + SpotInstanceInterruptionBehaviorTerminate, + } +} + const ( // SpotInstanceStateOpen is a SpotInstanceState enum value SpotInstanceStateOpen = "open" @@ -109259,6 +121026,17 @@ const ( SpotInstanceStateFailed = "failed" ) +// SpotInstanceState_Values returns all elements of the SpotInstanceState enum +func SpotInstanceState_Values() []string { + return []string{ + SpotInstanceStateOpen, + SpotInstanceStateActive, + SpotInstanceStateClosed, + SpotInstanceStateCancelled, + SpotInstanceStateFailed, + } +} + const ( // SpotInstanceTypeOneTime is a SpotInstanceType enum value SpotInstanceTypeOneTime = "one-time" @@ -109267,6 +121045,14 @@ const ( SpotInstanceTypePersistent = "persistent" ) +// SpotInstanceType_Values returns all elements of the SpotInstanceType enum +func SpotInstanceType_Values() []string { + return []string{ + SpotInstanceTypeOneTime, + SpotInstanceTypePersistent, + } +} + const ( // StatePendingAcceptance is a State enum value StatePendingAcceptance = "PendingAcceptance" @@ -109293,6 +121079,20 @@ const ( StateExpired = "Expired" ) +// State_Values returns all elements of the State enum +func State_Values() []string { + return []string{ + StatePendingAcceptance, + StatePending, + StateAvailable, + StateDeleting, + StateDeleted, + StateRejected, + StateFailed, + StateExpired, + } +} + const ( // StatusMoveInProgress is a Status enum value StatusMoveInProgress = "MoveInProgress" @@ -109304,11 +121104,27 @@ const ( StatusInClassic = "InClassic" ) +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusMoveInProgress, + StatusInVpc, + StatusInClassic, + } +} + const ( // StatusNameReachability is a StatusName enum value StatusNameReachability = "reachability" ) +// StatusName_Values returns all elements of the StatusName enum +func StatusName_Values() []string { + return []string{ + StatusNameReachability, + } +} + const ( // StatusTypePassed is a StatusType enum value StatusTypePassed = "passed" @@ -109323,6 +121139,16 @@ const ( StatusTypeInitializing = "initializing" ) +// StatusType_Values returns all elements of the StatusType enum +func StatusType_Values() []string { + return []string{ + StatusTypePassed, + StatusTypeFailed, + StatusTypeInsufficientData, + StatusTypeInitializing, + } +} + const ( // SubnetCidrBlockStateCodeAssociating is a SubnetCidrBlockStateCode enum value SubnetCidrBlockStateCodeAssociating = "associating" @@ -109343,6 +121169,18 @@ const ( SubnetCidrBlockStateCodeFailed = "failed" ) +// SubnetCidrBlockStateCode_Values returns all elements of the SubnetCidrBlockStateCode enum +func SubnetCidrBlockStateCode_Values() []string { + return []string{ + SubnetCidrBlockStateCodeAssociating, + SubnetCidrBlockStateCodeAssociated, + SubnetCidrBlockStateCodeDisassociating, + SubnetCidrBlockStateCodeDisassociated, + SubnetCidrBlockStateCodeFailing, + SubnetCidrBlockStateCodeFailed, + } +} + const ( // SubnetStatePending is a SubnetState enum value SubnetStatePending = "pending" @@ -109351,6 +121189,14 @@ const ( SubnetStateAvailable = "available" ) +// SubnetState_Values returns all elements of the SubnetState enum +func SubnetState_Values() []string { + return []string{ + SubnetStatePending, + SubnetStateAvailable, + } +} + const ( // SummaryStatusOk is a SummaryStatus enum value SummaryStatusOk = "ok" @@ -109368,6 +121214,17 @@ const ( SummaryStatusInitializing = "initializing" ) +// SummaryStatus_Values returns all elements of the SummaryStatus enum +func SummaryStatus_Values() []string { + return []string{ + SummaryStatusOk, + SummaryStatusImpaired, + SummaryStatusInsufficientData, + SummaryStatusNotApplicable, + SummaryStatusInitializing, + } +} + const ( // TelemetryStatusUp is a TelemetryStatus enum value TelemetryStatusUp = "UP" @@ -109376,6 +121233,14 @@ const ( TelemetryStatusDown = "DOWN" ) +// TelemetryStatus_Values returns all elements of the TelemetryStatus enum +func TelemetryStatus_Values() []string { + return []string{ + TelemetryStatusUp, + TelemetryStatusDown, + } +} + const ( // TenancyDefault is a Tenancy enum value TenancyDefault = "default" @@ -109387,6 +121252,15 @@ const ( TenancyHost = "host" ) +// Tenancy_Values returns all elements of the Tenancy enum +func Tenancy_Values() []string { + return []string{ + TenancyDefault, + TenancyDedicated, + TenancyHost, + } +} + const ( // TrafficDirectionIngress is a TrafficDirection enum value TrafficDirectionIngress = "ingress" @@ -109395,6 +121269,14 @@ const ( TrafficDirectionEgress = "egress" ) +// TrafficDirection_Values returns all elements of the TrafficDirection enum +func TrafficDirection_Values() []string { + return []string{ + TrafficDirectionIngress, + TrafficDirectionEgress, + } +} + const ( // TrafficMirrorFilterRuleFieldDestinationPortRange is a TrafficMirrorFilterRuleField enum value TrafficMirrorFilterRuleFieldDestinationPortRange = "destination-port-range" @@ -109409,11 +121291,28 @@ const ( TrafficMirrorFilterRuleFieldDescription = "description" ) +// TrafficMirrorFilterRuleField_Values returns all elements of the TrafficMirrorFilterRuleField enum +func TrafficMirrorFilterRuleField_Values() []string { + return []string{ + TrafficMirrorFilterRuleFieldDestinationPortRange, + TrafficMirrorFilterRuleFieldSourcePortRange, + TrafficMirrorFilterRuleFieldProtocol, + TrafficMirrorFilterRuleFieldDescription, + } +} + const ( // TrafficMirrorNetworkServiceAmazonDns is a TrafficMirrorNetworkService enum value TrafficMirrorNetworkServiceAmazonDns = "amazon-dns" ) +// TrafficMirrorNetworkService_Values returns all elements of the TrafficMirrorNetworkService enum +func TrafficMirrorNetworkService_Values() []string { + return []string{ + TrafficMirrorNetworkServiceAmazonDns, + } +} + const ( // TrafficMirrorRuleActionAccept is a TrafficMirrorRuleAction enum value TrafficMirrorRuleActionAccept = "accept" @@ -109422,6 +121321,14 @@ const ( TrafficMirrorRuleActionReject = "reject" ) +// TrafficMirrorRuleAction_Values returns all elements of the TrafficMirrorRuleAction enum +func TrafficMirrorRuleAction_Values() []string { + return []string{ + TrafficMirrorRuleActionAccept, + TrafficMirrorRuleActionReject, + } +} + const ( // TrafficMirrorSessionFieldPacketLength is a TrafficMirrorSessionField enum value TrafficMirrorSessionFieldPacketLength = "packet-length" @@ -109433,6 +121340,15 @@ const ( TrafficMirrorSessionFieldVirtualNetworkId = "virtual-network-id" ) +// TrafficMirrorSessionField_Values returns all elements of the TrafficMirrorSessionField enum +func TrafficMirrorSessionField_Values() []string { + return []string{ + TrafficMirrorSessionFieldPacketLength, + TrafficMirrorSessionFieldDescription, + TrafficMirrorSessionFieldVirtualNetworkId, + } +} + const ( // TrafficMirrorTargetTypeNetworkInterface is a TrafficMirrorTargetType enum value TrafficMirrorTargetTypeNetworkInterface = "network-interface" @@ -109441,6 +121357,14 @@ const ( TrafficMirrorTargetTypeNetworkLoadBalancer = "network-load-balancer" ) +// TrafficMirrorTargetType_Values returns all elements of the TrafficMirrorTargetType enum +func TrafficMirrorTargetType_Values() []string { + return []string{ + TrafficMirrorTargetTypeNetworkInterface, + TrafficMirrorTargetTypeNetworkLoadBalancer, + } +} + const ( // TrafficTypeAccept is a TrafficType enum value TrafficTypeAccept = "ACCEPT" @@ -109452,6 +121376,15 @@ const ( TrafficTypeAll = "ALL" ) +// TrafficType_Values returns all elements of the TrafficType enum +func TrafficType_Values() []string { + return []string{ + TrafficTypeAccept, + TrafficTypeReject, + TrafficTypeAll, + } +} + const ( // TransitGatewayAssociationStateAssociating is a TransitGatewayAssociationState enum value TransitGatewayAssociationStateAssociating = "associating" @@ -109466,6 +121399,16 @@ const ( TransitGatewayAssociationStateDisassociated = "disassociated" ) +// TransitGatewayAssociationState_Values returns all elements of the TransitGatewayAssociationState enum +func TransitGatewayAssociationState_Values() []string { + return []string{ + TransitGatewayAssociationStateAssociating, + TransitGatewayAssociationStateAssociated, + TransitGatewayAssociationStateDisassociating, + TransitGatewayAssociationStateDisassociated, + } +} + const ( // TransitGatewayAttachmentResourceTypeVpc is a TransitGatewayAttachmentResourceType enum value TransitGatewayAttachmentResourceTypeVpc = "vpc" @@ -109476,14 +121419,31 @@ const ( // TransitGatewayAttachmentResourceTypeDirectConnectGateway is a TransitGatewayAttachmentResourceType enum value TransitGatewayAttachmentResourceTypeDirectConnectGateway = "direct-connect-gateway" + // TransitGatewayAttachmentResourceTypePeering is a TransitGatewayAttachmentResourceType enum value + TransitGatewayAttachmentResourceTypePeering = "peering" + // TransitGatewayAttachmentResourceTypeTgwPeering is a TransitGatewayAttachmentResourceType enum value TransitGatewayAttachmentResourceTypeTgwPeering = "tgw-peering" ) +// TransitGatewayAttachmentResourceType_Values returns all elements of the TransitGatewayAttachmentResourceType enum +func TransitGatewayAttachmentResourceType_Values() []string { + return []string{ + TransitGatewayAttachmentResourceTypeVpc, + TransitGatewayAttachmentResourceTypeVpn, + TransitGatewayAttachmentResourceTypeDirectConnectGateway, + TransitGatewayAttachmentResourceTypePeering, + TransitGatewayAttachmentResourceTypeTgwPeering, + } +} + const ( // TransitGatewayAttachmentStateInitiating is a TransitGatewayAttachmentState enum value TransitGatewayAttachmentStateInitiating = "initiating" + // TransitGatewayAttachmentStateInitiatingRequest is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateInitiatingRequest = "initiatingRequest" + // TransitGatewayAttachmentStatePendingAcceptance is a TransitGatewayAttachmentState enum value TransitGatewayAttachmentStatePendingAcceptance = "pendingAcceptance" @@ -109518,6 +121478,25 @@ const ( TransitGatewayAttachmentStateFailing = "failing" ) +// TransitGatewayAttachmentState_Values returns all elements of the TransitGatewayAttachmentState enum +func TransitGatewayAttachmentState_Values() []string { + return []string{ + TransitGatewayAttachmentStateInitiating, + TransitGatewayAttachmentStateInitiatingRequest, + TransitGatewayAttachmentStatePendingAcceptance, + TransitGatewayAttachmentStateRollingBack, + TransitGatewayAttachmentStatePending, + TransitGatewayAttachmentStateAvailable, + TransitGatewayAttachmentStateModifying, + TransitGatewayAttachmentStateDeleting, + TransitGatewayAttachmentStateDeleted, + TransitGatewayAttachmentStateFailed, + TransitGatewayAttachmentStateRejected, + TransitGatewayAttachmentStateRejecting, + TransitGatewayAttachmentStateFailing, + } +} + const ( // TransitGatewayMulitcastDomainAssociationStateAssociating is a TransitGatewayMulitcastDomainAssociationState enum value TransitGatewayMulitcastDomainAssociationStateAssociating = "associating" @@ -109532,6 +121511,16 @@ const ( TransitGatewayMulitcastDomainAssociationStateDisassociated = "disassociated" ) +// TransitGatewayMulitcastDomainAssociationState_Values returns all elements of the TransitGatewayMulitcastDomainAssociationState enum +func TransitGatewayMulitcastDomainAssociationState_Values() []string { + return []string{ + TransitGatewayMulitcastDomainAssociationStateAssociating, + TransitGatewayMulitcastDomainAssociationStateAssociated, + TransitGatewayMulitcastDomainAssociationStateDisassociating, + TransitGatewayMulitcastDomainAssociationStateDisassociated, + } +} + const ( // TransitGatewayMulticastDomainStatePending is a TransitGatewayMulticastDomainState enum value TransitGatewayMulticastDomainStatePending = "pending" @@ -109546,6 +121535,40 @@ const ( TransitGatewayMulticastDomainStateDeleted = "deleted" ) +// TransitGatewayMulticastDomainState_Values returns all elements of the TransitGatewayMulticastDomainState enum +func TransitGatewayMulticastDomainState_Values() []string { + return []string{ + TransitGatewayMulticastDomainStatePending, + TransitGatewayMulticastDomainStateAvailable, + TransitGatewayMulticastDomainStateDeleting, + TransitGatewayMulticastDomainStateDeleted, + } +} + +const ( + // TransitGatewayPrefixListReferenceStatePending is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStatePending = "pending" + + // TransitGatewayPrefixListReferenceStateAvailable is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStateAvailable = "available" + + // TransitGatewayPrefixListReferenceStateModifying is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStateModifying = "modifying" + + // TransitGatewayPrefixListReferenceStateDeleting is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStateDeleting = "deleting" +) + +// TransitGatewayPrefixListReferenceState_Values returns all elements of the TransitGatewayPrefixListReferenceState enum +func TransitGatewayPrefixListReferenceState_Values() []string { + return []string{ + TransitGatewayPrefixListReferenceStatePending, + TransitGatewayPrefixListReferenceStateAvailable, + TransitGatewayPrefixListReferenceStateModifying, + TransitGatewayPrefixListReferenceStateDeleting, + } +} + const ( // TransitGatewayPropagationStateEnabling is a TransitGatewayPropagationState enum value TransitGatewayPropagationStateEnabling = "enabling" @@ -109560,6 +121583,16 @@ const ( TransitGatewayPropagationStateDisabled = "disabled" ) +// TransitGatewayPropagationState_Values returns all elements of the TransitGatewayPropagationState enum +func TransitGatewayPropagationState_Values() []string { + return []string{ + TransitGatewayPropagationStateEnabling, + TransitGatewayPropagationStateEnabled, + TransitGatewayPropagationStateDisabling, + TransitGatewayPropagationStateDisabled, + } +} + const ( // TransitGatewayRouteStatePending is a TransitGatewayRouteState enum value TransitGatewayRouteStatePending = "pending" @@ -109577,6 +121610,17 @@ const ( TransitGatewayRouteStateDeleted = "deleted" ) +// TransitGatewayRouteState_Values returns all elements of the TransitGatewayRouteState enum +func TransitGatewayRouteState_Values() []string { + return []string{ + TransitGatewayRouteStatePending, + TransitGatewayRouteStateActive, + TransitGatewayRouteStateBlackhole, + TransitGatewayRouteStateDeleting, + TransitGatewayRouteStateDeleted, + } +} + const ( // TransitGatewayRouteTableStatePending is a TransitGatewayRouteTableState enum value TransitGatewayRouteTableStatePending = "pending" @@ -109591,6 +121635,16 @@ const ( TransitGatewayRouteTableStateDeleted = "deleted" ) +// TransitGatewayRouteTableState_Values returns all elements of the TransitGatewayRouteTableState enum +func TransitGatewayRouteTableState_Values() []string { + return []string{ + TransitGatewayRouteTableStatePending, + TransitGatewayRouteTableStateAvailable, + TransitGatewayRouteTableStateDeleting, + TransitGatewayRouteTableStateDeleted, + } +} + const ( // TransitGatewayRouteTypeStatic is a TransitGatewayRouteType enum value TransitGatewayRouteTypeStatic = "static" @@ -109599,6 +121653,14 @@ const ( TransitGatewayRouteTypePropagated = "propagated" ) +// TransitGatewayRouteType_Values returns all elements of the TransitGatewayRouteType enum +func TransitGatewayRouteType_Values() []string { + return []string{ + TransitGatewayRouteTypeStatic, + TransitGatewayRouteTypePropagated, + } +} + const ( // TransitGatewayStatePending is a TransitGatewayState enum value TransitGatewayStatePending = "pending" @@ -109616,6 +121678,17 @@ const ( TransitGatewayStateDeleted = "deleted" ) +// TransitGatewayState_Values returns all elements of the TransitGatewayState enum +func TransitGatewayState_Values() []string { + return []string{ + TransitGatewayStatePending, + TransitGatewayStateAvailable, + TransitGatewayStateModifying, + TransitGatewayStateDeleting, + TransitGatewayStateDeleted, + } +} + const ( // TransportProtocolTcp is a TransportProtocol enum value TransportProtocolTcp = "tcp" @@ -109624,6 +121697,30 @@ const ( TransportProtocolUdp = "udp" ) +// TransportProtocol_Values returns all elements of the TransportProtocol enum +func TransportProtocol_Values() []string { + return []string{ + TransportProtocolTcp, + TransportProtocolUdp, + } +} + +const ( + // TunnelInsideIpVersionIpv4 is a TunnelInsideIpVersion enum value + TunnelInsideIpVersionIpv4 = "ipv4" + + // TunnelInsideIpVersionIpv6 is a TunnelInsideIpVersion enum value + TunnelInsideIpVersionIpv6 = "ipv6" +) + +// TunnelInsideIpVersion_Values returns all elements of the TunnelInsideIpVersion enum +func TunnelInsideIpVersion_Values() []string { + return []string{ + TunnelInsideIpVersionIpv4, + TunnelInsideIpVersionIpv6, + } +} + const ( // UnlimitedSupportedInstanceFamilyT2 is a UnlimitedSupportedInstanceFamily enum value UnlimitedSupportedInstanceFamilyT2 = "t2" @@ -109633,8 +121730,21 @@ const ( // UnlimitedSupportedInstanceFamilyT3a is a UnlimitedSupportedInstanceFamily enum value UnlimitedSupportedInstanceFamilyT3a = "t3a" + + // UnlimitedSupportedInstanceFamilyT4g is a UnlimitedSupportedInstanceFamily enum value + UnlimitedSupportedInstanceFamilyT4g = "t4g" ) +// UnlimitedSupportedInstanceFamily_Values returns all elements of the UnlimitedSupportedInstanceFamily enum +func UnlimitedSupportedInstanceFamily_Values() []string { + return []string{ + UnlimitedSupportedInstanceFamilyT2, + UnlimitedSupportedInstanceFamilyT3, + UnlimitedSupportedInstanceFamilyT3a, + UnlimitedSupportedInstanceFamilyT4g, + } +} + const ( // UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdMalformed is a UnsuccessfulInstanceCreditSpecificationErrorCode enum value UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdMalformed = "InvalidInstanceID.Malformed" @@ -109649,6 +121759,16 @@ const ( UnsuccessfulInstanceCreditSpecificationErrorCodeInstanceCreditSpecificationNotSupported = "InstanceCreditSpecification.NotSupported" ) +// UnsuccessfulInstanceCreditSpecificationErrorCode_Values returns all elements of the UnsuccessfulInstanceCreditSpecificationErrorCode enum +func UnsuccessfulInstanceCreditSpecificationErrorCode_Values() []string { + return []string{ + UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdMalformed, + UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdNotFound, + UnsuccessfulInstanceCreditSpecificationErrorCodeIncorrectInstanceState, + UnsuccessfulInstanceCreditSpecificationErrorCodeInstanceCreditSpecificationNotSupported, + } +} + const ( // UsageClassTypeSpot is a UsageClassType enum value UsageClassTypeSpot = "spot" @@ -109657,6 +121777,14 @@ const ( UsageClassTypeOnDemand = "on-demand" ) +// UsageClassType_Values returns all elements of the UsageClassType enum +func UsageClassType_Values() []string { + return []string{ + UsageClassTypeSpot, + UsageClassTypeOnDemand, + } +} + const ( // VirtualizationTypeHvm is a VirtualizationType enum value VirtualizationTypeHvm = "hvm" @@ -109665,6 +121793,14 @@ const ( VirtualizationTypeParavirtual = "paravirtual" ) +// VirtualizationType_Values returns all elements of the VirtualizationType enum +func VirtualizationType_Values() []string { + return []string{ + VirtualizationTypeHvm, + VirtualizationTypeParavirtual, + } +} + const ( // VolumeAttachmentStateAttaching is a VolumeAttachmentState enum value VolumeAttachmentStateAttaching = "attaching" @@ -109682,6 +121818,17 @@ const ( VolumeAttachmentStateBusy = "busy" ) +// VolumeAttachmentState_Values returns all elements of the VolumeAttachmentState enum +func VolumeAttachmentState_Values() []string { + return []string{ + VolumeAttachmentStateAttaching, + VolumeAttachmentStateAttached, + VolumeAttachmentStateDetaching, + VolumeAttachmentStateDetached, + VolumeAttachmentStateBusy, + } +} + const ( // VolumeAttributeNameAutoEnableIo is a VolumeAttributeName enum value VolumeAttributeNameAutoEnableIo = "autoEnableIO" @@ -109690,6 +121837,14 @@ const ( VolumeAttributeNameProductCodes = "productCodes" ) +// VolumeAttributeName_Values returns all elements of the VolumeAttributeName enum +func VolumeAttributeName_Values() []string { + return []string{ + VolumeAttributeNameAutoEnableIo, + VolumeAttributeNameProductCodes, + } +} + const ( // VolumeModificationStateModifying is a VolumeModificationState enum value VolumeModificationStateModifying = "modifying" @@ -109704,6 +121859,16 @@ const ( VolumeModificationStateFailed = "failed" ) +// VolumeModificationState_Values returns all elements of the VolumeModificationState enum +func VolumeModificationState_Values() []string { + return []string{ + VolumeModificationStateModifying, + VolumeModificationStateOptimizing, + VolumeModificationStateCompleted, + VolumeModificationStateFailed, + } +} + const ( // VolumeStateCreating is a VolumeState enum value VolumeStateCreating = "creating" @@ -109724,6 +121889,18 @@ const ( VolumeStateError = "error" ) +// VolumeState_Values returns all elements of the VolumeState enum +func VolumeState_Values() []string { + return []string{ + VolumeStateCreating, + VolumeStateAvailable, + VolumeStateInUse, + VolumeStateDeleting, + VolumeStateDeleted, + VolumeStateError, + } +} + const ( // VolumeStatusInfoStatusOk is a VolumeStatusInfoStatus enum value VolumeStatusInfoStatusOk = "ok" @@ -109735,6 +121912,15 @@ const ( VolumeStatusInfoStatusInsufficientData = "insufficient-data" ) +// VolumeStatusInfoStatus_Values returns all elements of the VolumeStatusInfoStatus enum +func VolumeStatusInfoStatus_Values() []string { + return []string{ + VolumeStatusInfoStatusOk, + VolumeStatusInfoStatusImpaired, + VolumeStatusInfoStatusInsufficientData, + } +} + const ( // VolumeStatusNameIoEnabled is a VolumeStatusName enum value VolumeStatusNameIoEnabled = "io-enabled" @@ -109743,6 +121929,14 @@ const ( VolumeStatusNameIoPerformance = "io-performance" ) +// VolumeStatusName_Values returns all elements of the VolumeStatusName enum +func VolumeStatusName_Values() []string { + return []string{ + VolumeStatusNameIoEnabled, + VolumeStatusNameIoPerformance, + } +} + const ( // VolumeTypeStandard is a VolumeType enum value VolumeTypeStandard = "standard" @@ -109750,6 +121944,9 @@ const ( // VolumeTypeIo1 is a VolumeType enum value VolumeTypeIo1 = "io1" + // VolumeTypeIo2 is a VolumeType enum value + VolumeTypeIo2 = "io2" + // VolumeTypeGp2 is a VolumeType enum value VolumeTypeGp2 = "gp2" @@ -109760,6 +121957,18 @@ const ( VolumeTypeSt1 = "st1" ) +// VolumeType_Values returns all elements of the VolumeType enum +func VolumeType_Values() []string { + return []string{ + VolumeTypeStandard, + VolumeTypeIo1, + VolumeTypeIo2, + VolumeTypeGp2, + VolumeTypeSc1, + VolumeTypeSt1, + } +} + const ( // VpcAttributeNameEnableDnsSupport is a VpcAttributeName enum value VpcAttributeNameEnableDnsSupport = "enableDnsSupport" @@ -109768,6 +121977,14 @@ const ( VpcAttributeNameEnableDnsHostnames = "enableDnsHostnames" ) +// VpcAttributeName_Values returns all elements of the VpcAttributeName enum +func VpcAttributeName_Values() []string { + return []string{ + VpcAttributeNameEnableDnsSupport, + VpcAttributeNameEnableDnsHostnames, + } +} + const ( // VpcCidrBlockStateCodeAssociating is a VpcCidrBlockStateCode enum value VpcCidrBlockStateCodeAssociating = "associating" @@ -109788,6 +122005,18 @@ const ( VpcCidrBlockStateCodeFailed = "failed" ) +// VpcCidrBlockStateCode_Values returns all elements of the VpcCidrBlockStateCode enum +func VpcCidrBlockStateCode_Values() []string { + return []string{ + VpcCidrBlockStateCodeAssociating, + VpcCidrBlockStateCodeAssociated, + VpcCidrBlockStateCodeDisassociating, + VpcCidrBlockStateCodeDisassociated, + VpcCidrBlockStateCodeFailing, + VpcCidrBlockStateCodeFailed, + } +} + const ( // VpcEndpointTypeInterface is a VpcEndpointType enum value VpcEndpointTypeInterface = "Interface" @@ -109796,6 +122025,14 @@ const ( VpcEndpointTypeGateway = "Gateway" ) +// VpcEndpointType_Values returns all elements of the VpcEndpointType enum +func VpcEndpointType_Values() []string { + return []string{ + VpcEndpointTypeInterface, + VpcEndpointTypeGateway, + } +} + const ( // VpcPeeringConnectionStateReasonCodeInitiatingRequest is a VpcPeeringConnectionStateReasonCode enum value VpcPeeringConnectionStateReasonCodeInitiatingRequest = "initiating-request" @@ -109825,6 +122062,21 @@ const ( VpcPeeringConnectionStateReasonCodeDeleting = "deleting" ) +// VpcPeeringConnectionStateReasonCode_Values returns all elements of the VpcPeeringConnectionStateReasonCode enum +func VpcPeeringConnectionStateReasonCode_Values() []string { + return []string{ + VpcPeeringConnectionStateReasonCodeInitiatingRequest, + VpcPeeringConnectionStateReasonCodePendingAcceptance, + VpcPeeringConnectionStateReasonCodeActive, + VpcPeeringConnectionStateReasonCodeDeleted, + VpcPeeringConnectionStateReasonCodeRejected, + VpcPeeringConnectionStateReasonCodeFailed, + VpcPeeringConnectionStateReasonCodeExpired, + VpcPeeringConnectionStateReasonCodeProvisioning, + VpcPeeringConnectionStateReasonCodeDeleting, + } +} + const ( // VpcStatePending is a VpcState enum value VpcStatePending = "pending" @@ -109833,11 +122085,26 @@ const ( VpcStateAvailable = "available" ) +// VpcState_Values returns all elements of the VpcState enum +func VpcState_Values() []string { + return []string{ + VpcStatePending, + VpcStateAvailable, + } +} + const ( // VpcTenancyDefault is a VpcTenancy enum value VpcTenancyDefault = "default" ) +// VpcTenancy_Values returns all elements of the VpcTenancy enum +func VpcTenancy_Values() []string { + return []string{ + VpcTenancyDefault, + } +} + const ( // VpnEcmpSupportValueEnable is a VpnEcmpSupportValue enum value VpnEcmpSupportValueEnable = "enable" @@ -109846,11 +122113,26 @@ const ( VpnEcmpSupportValueDisable = "disable" ) +// VpnEcmpSupportValue_Values returns all elements of the VpnEcmpSupportValue enum +func VpnEcmpSupportValue_Values() []string { + return []string{ + VpnEcmpSupportValueEnable, + VpnEcmpSupportValueDisable, + } +} + const ( // VpnProtocolOpenvpn is a VpnProtocol enum value VpnProtocolOpenvpn = "openvpn" ) +// VpnProtocol_Values returns all elements of the VpnProtocol enum +func VpnProtocol_Values() []string { + return []string{ + VpnProtocolOpenvpn, + } +} + const ( // VpnStatePending is a VpnState enum value VpnStatePending = "pending" @@ -109865,7 +122147,24 @@ const ( VpnStateDeleted = "deleted" ) +// VpnState_Values returns all elements of the VpnState enum +func VpnState_Values() []string { + return []string{ + VpnStatePending, + VpnStateAvailable, + VpnStateDeleting, + VpnStateDeleted, + } +} + const ( // VpnStaticRouteSourceStatic is a VpnStaticRouteSource enum value VpnStaticRouteSourceStatic = "Static" ) + +// VpnStaticRouteSource_Values returns all elements of the VpnStaticRouteSource enum +func VpnStaticRouteSource_Values() []string { + return []string{ + VpnStaticRouteSourceStatic, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go index efec8d8a94e4..3ad30591894c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -26,8 +26,12 @@ func init() { // only set the retryer on request if config doesn't have a retryer if r.Config.Retryer == nil && (r.Operation.Name == opModifyNetworkInterfaceAttribute || r.Operation.Name == opAssignPrivateIpAddresses) { + maxRetries := client.DefaultRetryerMaxNumRetries + if m := r.Config.MaxRetries; m != nil && *m != aws.UseServiceDefaultRetries { + maxRetries = *m + } r.Retryer = client.DefaultRetryer{ - NumMaxRetries: client.DefaultRetryerMaxNumRetries, + NumMaxRetries: maxRetries, MinRetryDelay: customRetryerMinRetryDelay, MinThrottleDelay: customRetryerMinRetryDelay, MaxRetryDelay: customRetryerMaxRetryDelay, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index 5fcf63e6bb3a..462b049cc3c5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -57,12 +57,15 @@ func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabil // BatchCheckLayerAvailability API operation for Amazon EC2 Container Registry. // -// Check the availability of multiple image layers in a specified registry and -// repository. +// Checks the availability of one or more image layers in a repository. +// +// When an image is pushed to a repository, each image layer is checked to verify +// if it has been uploaded before. If it has been uploaded, then the image layer +// is skipped. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -149,8 +152,8 @@ func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *reques // BatchDeleteImage API operation for Amazon EC2 Container Registry. // -// Deletes a list of specified images within a specified repository. Images -// are specified with either imageTag or imageDigest. +// Deletes a list of specified images within a repository. Images are specified +// with either an imageTag or imageDigest. // // You can remove a tag from an image by specifying the image's tag in your // request. When you remove the last tag from an image, the image is deleted @@ -244,8 +247,11 @@ func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Requ // BatchGetImage API operation for Amazon EC2 Container Registry. // -// Gets detailed information for specified images within a specified repository. -// Images are specified with either imageTag or imageDigest. +// Gets detailed information for an image. Images are specified with either +// an imageTag or imageDigest. +// +// When an image is pulled, the BatchGetImage API is called once to retrieve +// the image manifest. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -336,9 +342,12 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // registry, repository name, and upload ID. You can optionally provide a sha256 // digest of the image layer for data validation purposes. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// When an image is pushed, the CompleteLayerUpload API is called once per each +// new image layer to verify that the upload has completed. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -360,7 +369,7 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // repository and ensure that you are performing operations on the correct registry. // // * UploadNotFoundException -// The upload could not be found, or the specified upload id is not valid for +// The upload could not be found, or the specified upload ID is not valid for // this repository. // // * InvalidLayerException @@ -376,6 +385,9 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // * EmptyUploadException // The specified layer upload does not contain any layer parts. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) { req, out := c.CompleteLayerUploadRequest(input) @@ -442,9 +454,7 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // CreateRepository API operation for Amazon EC2 Container Registry. // -// Creates an Amazon Elastic Container Registry (Amazon ECR) repository, where -// users can push and pull Docker images. For more information, see Amazon ECR -// Repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) +// Creates a repository. For more information, see Amazon ECR Repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) // in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -476,10 +486,12 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // // * LimitExceededException // The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR Default Service Limits -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { req, out := c.CreateRepositoryRequest(input) @@ -546,7 +558,7 @@ func (c *ECR) DeleteLifecyclePolicyRequest(input *DeleteLifecyclePolicyInput) (r // DeleteLifecyclePolicy API operation for Amazon EC2 Container Registry. // -// Deletes the specified lifecycle policy. +// Deletes the lifecycle policy associated with the specified repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -636,8 +648,9 @@ func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *reques // DeleteRepository API operation for Amazon EC2 Container Registry. // -// Deletes an existing image repository. If a repository contains images, you -// must use the force option to delete it. +// Deletes a repository. If the repository contains images, you must either +// delete all images in the repository or use the force option to delete the +// repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -662,6 +675,9 @@ func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *reques // The specified repository contains images. To delete a repository that contains // images, you must force the deletion with the force parameter. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { req, out := c.DeleteRepositoryRequest(input) @@ -728,7 +744,7 @@ func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) // DeleteRepositoryPolicy API operation for Amazon EC2 Container Registry. // -// Deletes the repository policy from a specified repository. +// Deletes the repository policy associated with the specified repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -825,7 +841,7 @@ func (c *ECR) DescribeImageScanFindingsRequest(input *DescribeImageScanFindingsI // DescribeImageScanFindings API operation for Amazon EC2 Container Registry. // -// Describes the image scan findings for the specified image. +// Returns the scan findings for the specified image. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -977,8 +993,7 @@ func (c *ECR) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re // DescribeImages API operation for Amazon EC2 Container Registry. // -// Returns metadata about the images in a repository, including image size, -// image tags, and creation date. +// Returns metadata about the images in a repository. // // Beginning with Docker version 1.9, the Docker client compresses image layers // before pushing them to a V2 Docker registry. The output of the docker images @@ -1270,14 +1285,16 @@ func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (r // GetAuthorizationToken API operation for Amazon EC2 Container Registry. // -// Retrieves a token that is valid for a specified registry for 12 hours. This -// command allows you to use the docker CLI to push and pull images with Amazon -// ECR. If you do not specify a registry, the default registry is assumed. +// Retrieves an authorization token. An authorization token represents your +// IAM authentication credentials and can be used to access any Amazon ECR registry +// that your IAM principal has access to. The authorization token is valid for +// 12 hours. // -// The authorizationToken returned for each registry specified is a base64 encoded -// string that can be decoded and used in a docker login command to authenticate -// to a registry. The AWS CLI offers an aws ecr get-login command that simplifies -// the login process. +// The authorizationToken returned is a base64 encoded string that can be decoded +// and used in a docker login command to authenticate to a registry. The AWS +// CLI offers an get-login-password command that simplifies the login process. +// For more information, see Registry Authentication (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Registries.html#registry_auth) +// in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1363,9 +1380,12 @@ func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) // Retrieves the pre-signed Amazon S3 download URL corresponding to an image // layer. You can only get URLs for image layers that are referenced in an image. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// When an image is pulled, the GetDownloadUrlForLayer API is called once per +// image layer that is not already cached. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1460,7 +1480,7 @@ func (c *ECR) GetLifecyclePolicyRequest(input *GetLifecyclePolicyInput) (req *re // GetLifecyclePolicy API operation for Amazon EC2 Container Registry. // -// Retrieves the specified lifecycle policy. +// Retrieves the lifecycle policy for the specified repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1556,7 +1576,8 @@ func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewI // GetLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. // -// Retrieves the results of the specified lifecycle policy preview request. +// Retrieves the results of the lifecycle policy preview request for the specified +// repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1698,7 +1719,7 @@ func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req * // GetRepositoryPolicy API operation for Amazon EC2 Container Registry. // -// Retrieves the repository policy for a specified repository. +// Retrieves the repository policy for the specified repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1789,11 +1810,15 @@ func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req * // InitiateLayerUpload API operation for Amazon EC2 Container Registry. // -// Notify Amazon ECR that you intend to upload an image layer. +// Notifies Amazon ECR that you intend to upload an image layer. +// +// When an image is pushed, the InitiateLayerUpload API is called once per image +// layer that has not already been uploaded. Whether or not an image layer has +// been uploaded is determined by the BatchCheckLayerAvailability API action. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1814,6 +1839,9 @@ func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req * // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) { req, out := c.InitiateLayerUploadRequest(input) @@ -1886,13 +1914,14 @@ func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, o // ListImages API operation for Amazon EC2 Container Registry. // -// Lists all the image IDs for a given repository. +// Lists all the image IDs for the specified repository. // -// You can filter images based on whether or not they are tagged by setting -// the tagStatus parameter to TAGGED or UNTAGGED. For example, you can filter -// your results to return only UNTAGGED images and then pipe that result to -// a BatchDeleteImage operation to delete them. Or, you can filter your results -// to return only TAGGED images to list all of the tags in your repository. +// You can filter images based on whether or not they are tagged by using the +// tagStatus filter and specifying either TAGGED, UNTAGGED or ANY. For example, +// you can filter your results to return only UNTAGGED images and then pipe +// that result to a BatchDeleteImage operation to delete them. Or, you can filter +// your results to return only TAGGED images to list all of the tags in your +// repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2120,9 +2149,13 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // // Creates or updates the image manifest and tags associated with an image. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// When an image is pushed and all new image layers have been uploaded, the +// PutImage API is called once to create or update the image manifest and the +// tags associated with the image. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2151,16 +2184,25 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // The specified layers could not be found, or the specified layer is not valid // for this repository. // +// * ReferencedImagesNotFoundException +// The manifest list is referencing an image that does not exist. +// // * LimitExceededException // The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR Default Service Limits -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. // // * ImageTagAlreadyExistsException // The specified image is tagged with a tag that already exists. The repository // is configured for tag immutability. // +// * ImageDigestDoesNotMatchException +// The specified image digest does not match the digest that Amazon ECR calculated +// for the image. +// +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { req, out := c.PutImageRequest(input) @@ -2227,7 +2269,7 @@ func (c *ECR) PutImageScanningConfigurationRequest(input *PutImageScanningConfig // PutImageScanningConfiguration API operation for Amazon EC2 Container Registry. // -// Updates the image scanning configuration for a repository. +// Updates the image scanning configuration for the specified repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2314,10 +2356,8 @@ func (c *ECR) PutImageTagMutabilityRequest(input *PutImageTagMutabilityInput) (r // PutImageTagMutability API operation for Amazon EC2 Container Registry. // -// Updates the image tag mutability settings for a repository. When a repository -// is configured with tag immutability, all image tags within the repository -// will be prevented them from being overwritten. For more information, see -// Image Tag Mutability (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html) +// Updates the image tag mutability settings for the specified repository. For +// more information, see Image Tag Mutability (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-tag-mutability.html) // in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2405,8 +2445,8 @@ func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *re // PutLifecyclePolicy API operation for Amazon EC2 Container Registry. // -// Creates or updates a lifecycle policy. For information about lifecycle policy -// syntax, see Lifecycle Policy Template (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html). +// Creates or updates the lifecycle policy for the specified repository. For +// more information, see Lifecycle Policy Template (https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2493,8 +2533,8 @@ func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req * // SetRepositoryPolicy API operation for Amazon EC2 Container Registry. // -// Applies a repository policy on a specified repository to control access permissions. -// For more information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/RepositoryPolicies.html) +// Applies a repository policy to the specified repository to control access +// permissions. For more information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) // in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2602,6 +2642,14 @@ func (c *ECR) StartImageScanRequest(input *StartImageScanInput) (req *request.Re // The specified parameter is invalid. Review the available parameters for the // API request. // +// * UnsupportedImageTypeException +// The image is of a type that cannot be scanned. +// +// * LimitExceededException +// The operation did not succeed because it would have exceeded a service limit +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. +// // * RepositoryNotFoundException // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. @@ -2675,8 +2723,9 @@ func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPrev // StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. // -// Starts a preview of the specified lifecycle policy. This allows you to see -// the results before creating the lifecycle policy. +// Starts a preview of a lifecycle policy for the specified repository. This +// allows you to see the results before associating the lifecycle policy with +// the repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2701,8 +2750,8 @@ func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPrev // The lifecycle policy could not be found, and no policy is set to the repository. // // * LifecyclePolicyPreviewInProgressException -// The previous lifecycle policy preview request has not completed. Please try -// again later. +// The previous lifecycle policy preview request has not completed. Wait and +// try again. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { @@ -2967,9 +3016,13 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // // Uploads an image layer part to Amazon ECR. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// When an image is pushed, each new image layer is uploaded in parts. The maximum +// size of each image layer part can be 20971520 bytes (or about 20MB). The +// UploadLayerPart API is called once per each new image layer part. +// +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2995,15 +3048,17 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // repository and ensure that you are performing operations on the correct registry. // // * UploadNotFoundException -// The upload could not be found, or the specified upload id is not valid for +// The upload could not be found, or the specified upload ID is not valid for // this repository. // // * LimitExceededException // The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR Default Service Limits -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) { req, out := c.UploadLayerPartRequest(input) @@ -3584,9 +3639,12 @@ func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOu type CreateRepositoryInput struct { _ struct{} `type:"structure"` - // The image scanning configuration for the repository. This setting determines - // whether images are scanned for known vulnerabilities after being pushed to - // the repository. + // The encryption configuration for the repository. This determines how the + // contents of your repository are encrypted at rest. + EncryptionConfiguration *EncryptionConfiguration `locationName:"encryptionConfiguration" type:"structure"` + + // The image scanning configuration for the repository. This determines whether + // images are scanned for known vulnerabilities after being pushed to the repository. ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` // The tag mutability setting for the repository. If this parameter is omitted, @@ -3628,6 +3686,11 @@ func (s *CreateRepositoryInput) Validate() error { if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) } + if s.EncryptionConfiguration != nil { + if err := s.EncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3635,6 +3698,12 @@ func (s *CreateRepositoryInput) Validate() error { return nil } +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *CreateRepositoryInput) SetEncryptionConfiguration(v *EncryptionConfiguration) *CreateRepositoryInput { + s.EncryptionConfiguration = v + return s +} + // SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. func (s *CreateRepositoryInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *CreateRepositoryInput { s.ImageScanningConfiguration = v @@ -4430,8 +4499,8 @@ func (s *DescribeRepositoriesOutput) SetRepositories(v []*Repository) *DescribeR // The specified layer upload does not contain any layer parts. type EmptyUploadException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -4449,17 +4518,17 @@ func (s EmptyUploadException) GoString() string { func newErrorEmptyUploadException(v protocol.ResponseMetadata) error { return &EmptyUploadException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EmptyUploadException) Code() string { +func (s *EmptyUploadException) Code() string { return "EmptyUploadException" } // Message returns the exception's message. -func (s EmptyUploadException) Message() string { +func (s *EmptyUploadException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4467,29 +4536,110 @@ func (s EmptyUploadException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EmptyUploadException) OrigErr() error { +func (s *EmptyUploadException) OrigErr() error { return nil } -func (s EmptyUploadException) Error() string { +func (s *EmptyUploadException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EmptyUploadException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EmptyUploadException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EmptyUploadException) RequestID() string { - return s.respMetadata.RequestID +func (s *EmptyUploadException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The encryption configuration for the repository. This determines how the +// contents of your repository are encrypted at rest. +// +// By default, when no encryption configuration is set or the AES256 encryption +// type is used, Amazon ECR uses server-side encryption with Amazon S3-managed +// encryption keys which encrypts your data at rest using an AES-256 encryption +// algorithm. This does not require any action on your part. +// +// For more control over the encryption of the contents of your repository, +// you can use server-side encryption with customer master keys (CMKs) stored +// in AWS Key Management Service (AWS KMS) to encrypt your images. For more +// information, see Amazon ECR encryption at rest (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) +// in the Amazon Elastic Container Registry User Guide. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The encryption type to use. + // + // If you use the KMS encryption type, the contents of the repository will be + // encrypted using server-side encryption with customer master keys (CMKs) stored + // in AWS KMS. When you use AWS KMS to encrypt your data, you can either use + // the default AWS managed CMK for Amazon ECR, or specify your own CMK, which + // you already created. For more information, see Protecting Data Using Server-Side + // Encryption with CMKs Stored in AWS Key Management Service (SSE-KMS) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) + // in the Amazon Simple Storage Service Console Developer Guide.. + // + // If you use the AES256 encryption type, Amazon ECR uses server-side encryption + // with Amazon S3-managed encryption keys which encrypts the images in the repository + // using an AES-256 encryption algorithm. For more information, see Protecting + // Data Using Server-Side Encryption with Amazon S3-Managed Encryption Keys + // (SSE-S3) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon Simple Storage Service Console Developer Guide.. + // + // EncryptionType is a required field + EncryptionType *string `locationName:"encryptionType" type:"string" required:"true" enum:"EncryptionType"` + + // If you use the KMS encryption type, specify the CMK to use for encryption. + // The alias, key ID, or full ARN of the CMK can be specified. The key must + // exist in the same Region as the repository. If no key is specified, the default + // AWS managed CMK for Amazon ECR will be used. + KmsKey *string `locationName:"kmsKey" min:"1" type:"string"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionConfiguration"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + if s.KmsKey != nil && len(*s.KmsKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKey", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *EncryptionConfiguration) SetEncryptionType(v string) *EncryptionConfiguration { + s.EncryptionType = &v + return s +} + +// SetKmsKey sets the KmsKey field's value. +func (s *EncryptionConfiguration) SetKmsKey(v string) *EncryptionConfiguration { + s.KmsKey = &v + return s } type GetAuthorizationTokenInput struct { _ struct{} `type:"structure"` // A list of AWS account IDs that are associated with the registries for which - // to get authorization tokens. If you do not specify a registry, the default + // to get AuthorizationData objects. If you do not specify a registry, the default // registry is assumed. RegistryIds []*string `locationName:"registryIds" min:"1" type:"list"` } @@ -5043,6 +5193,9 @@ type Image struct { // The image manifest associated with the image. ImageManifest *string `locationName:"imageManifest" min:"1" type:"string"` + // The manifest media type of the image. + ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` + // The AWS account ID associated with the registry containing the image. RegistryId *string `locationName:"registryId" type:"string"` @@ -5072,6 +5225,12 @@ func (s *Image) SetImageManifest(v string) *Image { return s } +// SetImageManifestMediaType sets the ImageManifestMediaType field's value. +func (s *Image) SetImageManifestMediaType(v string) *Image { + s.ImageManifestMediaType = &v + return s +} + // SetRegistryId sets the RegistryId field's value. func (s *Image) SetRegistryId(v string) *Image { s.RegistryId = &v @@ -5087,8 +5246,8 @@ func (s *Image) SetRepositoryName(v string) *Image { // The specified image has already been pushed, and there were no changes to // the manifest or image tag after the last push. type ImageAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -5106,17 +5265,17 @@ func (s ImageAlreadyExistsException) GoString() string { func newErrorImageAlreadyExistsException(v protocol.ResponseMetadata) error { return &ImageAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ImageAlreadyExistsException) Code() string { +func (s *ImageAlreadyExistsException) Code() string { return "ImageAlreadyExistsException" } // Message returns the exception's message. -func (s ImageAlreadyExistsException) Message() string { +func (s *ImageAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5124,31 +5283,37 @@ func (s ImageAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ImageAlreadyExistsException) OrigErr() error { +func (s *ImageAlreadyExistsException) OrigErr() error { return nil } -func (s ImageAlreadyExistsException) Error() string { +func (s *ImageAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ImageAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ImageAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ImageAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ImageAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // An object that describes an image returned by a DescribeImages operation. type ImageDetail struct { _ struct{} `type:"structure"` + // The artifact media type of the image. + ArtifactMediaType *string `locationName:"artifactMediaType" type:"string"` + // The sha256 digest of the image manifest. ImageDigest *string `locationName:"imageDigest" type:"string"` + // The media type of the image manifest. + ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` + // The date and time, expressed in standard JavaScript date format, at which // the current image was pushed to the repository. ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"` @@ -5161,6 +5326,9 @@ type ImageDetail struct { // The size, in bytes, of the image in the repository. // + // If the image is a manifest list, this will be the max size of all manifests + // in the list. + // // Beginning with Docker version 1.9, the Docker client compresses image layers // before pushing them to a V2 Docker registry. The output of the docker images // command shows the uncompressed image size, so it may return a larger image @@ -5187,12 +5355,24 @@ func (s ImageDetail) GoString() string { return s.String() } +// SetArtifactMediaType sets the ArtifactMediaType field's value. +func (s *ImageDetail) SetArtifactMediaType(v string) *ImageDetail { + s.ArtifactMediaType = &v + return s +} + // SetImageDigest sets the ImageDigest field's value. func (s *ImageDetail) SetImageDigest(v string) *ImageDetail { s.ImageDigest = &v return s } +// SetImageManifestMediaType sets the ImageManifestMediaType field's value. +func (s *ImageDetail) SetImageManifestMediaType(v string) *ImageDetail { + s.ImageManifestMediaType = &v + return s +} + // SetImagePushedAt sets the ImagePushedAt field's value. func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail { s.ImagePushedAt = &v @@ -5235,6 +5415,63 @@ func (s *ImageDetail) SetRepositoryName(v string) *ImageDetail { return s } +// The specified image digest does not match the digest that Amazon ECR calculated +// for the image. +type ImageDigestDoesNotMatchException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ImageDigestDoesNotMatchException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageDigestDoesNotMatchException) GoString() string { + return s.String() +} + +func newErrorImageDigestDoesNotMatchException(v protocol.ResponseMetadata) error { + return &ImageDigestDoesNotMatchException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ImageDigestDoesNotMatchException) Code() string { + return "ImageDigestDoesNotMatchException" +} + +// Message returns the exception's message. +func (s *ImageDigestDoesNotMatchException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ImageDigestDoesNotMatchException) OrigErr() error { + return nil +} + +func (s *ImageDigestDoesNotMatchException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ImageDigestDoesNotMatchException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ImageDigestDoesNotMatchException) RequestID() string { + return s.RespMetadata.RequestID +} + // An object representing an Amazon ECR image failure. type ImageFailure struct { _ struct{} `type:"structure"` @@ -5325,8 +5562,8 @@ func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { // The image requested does not exist in the specified repository. type ImageNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5343,17 +5580,17 @@ func (s ImageNotFoundException) GoString() string { func newErrorImageNotFoundException(v protocol.ResponseMetadata) error { return &ImageNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ImageNotFoundException) Code() string { +func (s *ImageNotFoundException) Code() string { return "ImageNotFoundException" } // Message returns the exception's message. -func (s ImageNotFoundException) Message() string { +func (s *ImageNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5361,22 +5598,22 @@ func (s ImageNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ImageNotFoundException) OrigErr() error { +func (s *ImageNotFoundException) OrigErr() error { return nil } -func (s ImageNotFoundException) Error() string { +func (s *ImageNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ImageNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ImageNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ImageNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ImageNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about an image scan finding. @@ -5596,8 +5833,8 @@ func (s *ImageScanningConfiguration) SetScanOnPush(v bool) *ImageScanningConfigu // The specified image is tagged with a tag that already exists. The repository // is configured for tag immutability. type ImageTagAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5614,17 +5851,17 @@ func (s ImageTagAlreadyExistsException) GoString() string { func newErrorImageTagAlreadyExistsException(v protocol.ResponseMetadata) error { return &ImageTagAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ImageTagAlreadyExistsException) Code() string { +func (s *ImageTagAlreadyExistsException) Code() string { return "ImageTagAlreadyExistsException" } // Message returns the exception's message. -func (s ImageTagAlreadyExistsException) Message() string { +func (s *ImageTagAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5632,22 +5869,22 @@ func (s ImageTagAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ImageTagAlreadyExistsException) OrigErr() error { +func (s *ImageTagAlreadyExistsException) OrigErr() error { return nil } -func (s ImageTagAlreadyExistsException) Error() string { +func (s *ImageTagAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ImageTagAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ImageTagAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ImageTagAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ImageTagAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } type InitiateLayerUploadInput struct { @@ -5738,8 +5975,8 @@ func (s *InitiateLayerUploadOutput) SetUploadId(v string) *InitiateLayerUploadOu // The layer digest calculation performed by Amazon ECR upon receipt of the // image layer does not match the digest specified. type InvalidLayerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -5757,17 +5994,17 @@ func (s InvalidLayerException) GoString() string { func newErrorInvalidLayerException(v protocol.ResponseMetadata) error { return &InvalidLayerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLayerException) Code() string { +func (s *InvalidLayerException) Code() string { return "InvalidLayerException" } // Message returns the exception's message. -func (s InvalidLayerException) Message() string { +func (s *InvalidLayerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5775,29 +6012,29 @@ func (s InvalidLayerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLayerException) OrigErr() error { +func (s *InvalidLayerException) OrigErr() error { return nil } -func (s InvalidLayerException) Error() string { +func (s *InvalidLayerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLayerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLayerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLayerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLayerException) RequestID() string { + return s.RespMetadata.RequestID } // The layer part size is not valid, or the first byte specified is not consecutive // to the last byte of a previous layer part upload. type InvalidLayerPartException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The last valid byte received from the layer part upload that is associated // with the exception. @@ -5828,17 +6065,17 @@ func (s InvalidLayerPartException) GoString() string { func newErrorInvalidLayerPartException(v protocol.ResponseMetadata) error { return &InvalidLayerPartException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLayerPartException) Code() string { +func (s *InvalidLayerPartException) Code() string { return "InvalidLayerPartException" } // Message returns the exception's message. -func (s InvalidLayerPartException) Message() string { +func (s *InvalidLayerPartException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5846,29 +6083,29 @@ func (s InvalidLayerPartException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLayerPartException) OrigErr() error { +func (s *InvalidLayerPartException) OrigErr() error { return nil } -func (s InvalidLayerPartException) Error() string { +func (s *InvalidLayerPartException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLayerPartException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLayerPartException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLayerPartException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLayerPartException) RequestID() string { + return s.RespMetadata.RequestID } // The specified parameter is invalid. Review the available parameters for the // API request. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -5886,17 +6123,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5904,30 +6141,30 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // An invalid parameter has been specified. Tag keys can have a maximum character // length of 128 characters, and tag values can have a maximum length of 256 // characters. type InvalidTagParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5944,17 +6181,17 @@ func (s InvalidTagParameterException) GoString() string { func newErrorInvalidTagParameterException(v protocol.ResponseMetadata) error { return &InvalidTagParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagParameterException) Code() string { +func (s *InvalidTagParameterException) Code() string { return "InvalidTagParameterException" } // Message returns the exception's message. -func (s InvalidTagParameterException) Message() string { +func (s *InvalidTagParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5962,22 +6199,81 @@ func (s InvalidTagParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagParameterException) OrigErr() error { +func (s *InvalidTagParameterException) OrigErr() error { return nil } -func (s InvalidTagParameterException) Error() string { +func (s *InvalidTagParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation failed due to a KMS exception. +type KmsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error code returned by AWS KMS. + KmsError *string `locationName:"kmsError" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s KmsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KmsException) GoString() string { + return s.String() +} + +func newErrorKmsException(v protocol.ResponseMetadata) error { + return &KmsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *KmsException) Code() string { + return "KmsException" +} + +// Message returns the exception's message. +func (s *KmsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *KmsException) OrigErr() error { + return nil +} + +func (s *KmsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *KmsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *KmsException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing an Amazon ECR image layer. @@ -6034,8 +6330,8 @@ func (s *Layer) SetMediaType(v string) *Layer { // The image layer already exists in the associated repository. type LayerAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6053,17 +6349,17 @@ func (s LayerAlreadyExistsException) GoString() string { func newErrorLayerAlreadyExistsException(v protocol.ResponseMetadata) error { return &LayerAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LayerAlreadyExistsException) Code() string { +func (s *LayerAlreadyExistsException) Code() string { return "LayerAlreadyExistsException" } // Message returns the exception's message. -func (s LayerAlreadyExistsException) Message() string { +func (s *LayerAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6071,22 +6367,22 @@ func (s LayerAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LayerAlreadyExistsException) OrigErr() error { +func (s *LayerAlreadyExistsException) OrigErr() error { return nil } -func (s LayerAlreadyExistsException) Error() string { +func (s *LayerAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LayerAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LayerAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LayerAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *LayerAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing an Amazon ECR image layer failure. @@ -6134,8 +6430,8 @@ func (s *LayerFailure) SetLayerDigest(v string) *LayerFailure { // The specified layer is not available because it is not associated with an // image. Unassociated image layers may be cleaned up at any time. type LayerInaccessibleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6153,17 +6449,17 @@ func (s LayerInaccessibleException) GoString() string { func newErrorLayerInaccessibleException(v protocol.ResponseMetadata) error { return &LayerInaccessibleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LayerInaccessibleException) Code() string { +func (s *LayerInaccessibleException) Code() string { return "LayerInaccessibleException" } // Message returns the exception's message. -func (s LayerInaccessibleException) Message() string { +func (s *LayerInaccessibleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6171,28 +6467,28 @@ func (s LayerInaccessibleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LayerInaccessibleException) OrigErr() error { +func (s *LayerInaccessibleException) OrigErr() error { return nil } -func (s LayerInaccessibleException) Error() string { +func (s *LayerInaccessibleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LayerInaccessibleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LayerInaccessibleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LayerInaccessibleException) RequestID() string { - return s.respMetadata.RequestID +func (s *LayerInaccessibleException) RequestID() string { + return s.RespMetadata.RequestID } // Layer parts must be at least 5 MiB in size. type LayerPartTooSmallException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6210,17 +6506,17 @@ func (s LayerPartTooSmallException) GoString() string { func newErrorLayerPartTooSmallException(v protocol.ResponseMetadata) error { return &LayerPartTooSmallException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LayerPartTooSmallException) Code() string { +func (s *LayerPartTooSmallException) Code() string { return "LayerPartTooSmallException" } // Message returns the exception's message. -func (s LayerPartTooSmallException) Message() string { +func (s *LayerPartTooSmallException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6228,29 +6524,29 @@ func (s LayerPartTooSmallException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LayerPartTooSmallException) OrigErr() error { +func (s *LayerPartTooSmallException) OrigErr() error { return nil } -func (s LayerPartTooSmallException) Error() string { +func (s *LayerPartTooSmallException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LayerPartTooSmallException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LayerPartTooSmallException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LayerPartTooSmallException) RequestID() string { - return s.respMetadata.RequestID +func (s *LayerPartTooSmallException) RequestID() string { + return s.RespMetadata.RequestID } // The specified layers could not be found, or the specified layer is not valid // for this repository. type LayersNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6268,17 +6564,17 @@ func (s LayersNotFoundException) GoString() string { func newErrorLayersNotFoundException(v protocol.ResponseMetadata) error { return &LayersNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LayersNotFoundException) Code() string { +func (s *LayersNotFoundException) Code() string { return "LayersNotFoundException" } // Message returns the exception's message. -func (s LayersNotFoundException) Message() string { +func (s *LayersNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6286,28 +6582,28 @@ func (s LayersNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LayersNotFoundException) OrigErr() error { +func (s *LayersNotFoundException) OrigErr() error { return nil } -func (s LayersNotFoundException) Error() string { +func (s *LayersNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LayersNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LayersNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LayersNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *LayersNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The lifecycle policy could not be found, and no policy is set to the repository. type LifecyclePolicyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6324,17 +6620,17 @@ func (s LifecyclePolicyNotFoundException) GoString() string { func newErrorLifecyclePolicyNotFoundException(v protocol.ResponseMetadata) error { return &LifecyclePolicyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LifecyclePolicyNotFoundException) Code() string { +func (s *LifecyclePolicyNotFoundException) Code() string { return "LifecyclePolicyNotFoundException" } // Message returns the exception's message. -func (s LifecyclePolicyNotFoundException) Message() string { +func (s *LifecyclePolicyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6342,22 +6638,22 @@ func (s LifecyclePolicyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LifecyclePolicyNotFoundException) OrigErr() error { +func (s *LifecyclePolicyNotFoundException) OrigErr() error { return nil } -func (s LifecyclePolicyNotFoundException) Error() string { +func (s *LifecyclePolicyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LifecyclePolicyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LifecyclePolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LifecyclePolicyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *LifecyclePolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The filter for the lifecycle policy preview. @@ -6384,11 +6680,11 @@ func (s *LifecyclePolicyPreviewFilter) SetTagStatus(v string) *LifecyclePolicyPr return s } -// The previous lifecycle policy preview request has not completed. Please try -// again later. +// The previous lifecycle policy preview request has not completed. Wait and +// try again. type LifecyclePolicyPreviewInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6405,17 +6701,17 @@ func (s LifecyclePolicyPreviewInProgressException) GoString() string { func newErrorLifecyclePolicyPreviewInProgressException(v protocol.ResponseMetadata) error { return &LifecyclePolicyPreviewInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LifecyclePolicyPreviewInProgressException) Code() string { +func (s *LifecyclePolicyPreviewInProgressException) Code() string { return "LifecyclePolicyPreviewInProgressException" } // Message returns the exception's message. -func (s LifecyclePolicyPreviewInProgressException) Message() string { +func (s *LifecyclePolicyPreviewInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6423,28 +6719,28 @@ func (s LifecyclePolicyPreviewInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LifecyclePolicyPreviewInProgressException) OrigErr() error { +func (s *LifecyclePolicyPreviewInProgressException) OrigErr() error { return nil } -func (s LifecyclePolicyPreviewInProgressException) Error() string { +func (s *LifecyclePolicyPreviewInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LifecyclePolicyPreviewInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LifecyclePolicyPreviewInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LifecyclePolicyPreviewInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *LifecyclePolicyPreviewInProgressException) RequestID() string { + return s.RespMetadata.RequestID } // There is no dry run for this repository. type LifecyclePolicyPreviewNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6461,17 +6757,17 @@ func (s LifecyclePolicyPreviewNotFoundException) GoString() string { func newErrorLifecyclePolicyPreviewNotFoundException(v protocol.ResponseMetadata) error { return &LifecyclePolicyPreviewNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LifecyclePolicyPreviewNotFoundException) Code() string { +func (s *LifecyclePolicyPreviewNotFoundException) Code() string { return "LifecyclePolicyPreviewNotFoundException" } // Message returns the exception's message. -func (s LifecyclePolicyPreviewNotFoundException) Message() string { +func (s *LifecyclePolicyPreviewNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6479,22 +6775,22 @@ func (s LifecyclePolicyPreviewNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LifecyclePolicyPreviewNotFoundException) OrigErr() error { +func (s *LifecyclePolicyPreviewNotFoundException) OrigErr() error { return nil } -func (s LifecyclePolicyPreviewNotFoundException) Error() string { +func (s *LifecyclePolicyPreviewNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LifecyclePolicyPreviewNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LifecyclePolicyPreviewNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LifecyclePolicyPreviewNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *LifecyclePolicyPreviewNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The result of the lifecycle policy preview. @@ -6607,12 +6903,11 @@ func (s *LifecyclePolicyRuleAction) SetType(v string) *LifecyclePolicyRuleAction } // The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR Default Service Limits -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6630,17 +6925,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6648,22 +6943,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing a filter on a ListImages operation. @@ -6885,13 +7180,22 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput type PutImageInput struct { _ struct{} `type:"structure"` + // The image digest of the image manifest corresponding to the image. + ImageDigest *string `locationName:"imageDigest" type:"string"` + // The image manifest corresponding to the image to be uploaded. // // ImageManifest is a required field ImageManifest *string `locationName:"imageManifest" min:"1" type:"string" required:"true"` + // The media type of the image manifest. If you push an image manifest that + // does not contain the mediaType field, you must specify the imageManifestMediaType + // in the request. + ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` + // The tag to associate with the image. This parameter is required for images - // that use the Docker Image Manifest V2 Schema 2 or OCI formats. + // that use the Docker Image Manifest V2 Schema 2 or Open Container Initiative + // (OCI) formats. ImageTag *string `locationName:"imageTag" min:"1" type:"string"` // The AWS account ID associated with the registry that contains the repository @@ -6940,12 +7244,24 @@ func (s *PutImageInput) Validate() error { return nil } +// SetImageDigest sets the ImageDigest field's value. +func (s *PutImageInput) SetImageDigest(v string) *PutImageInput { + s.ImageDigest = &v + return s +} + // SetImageManifest sets the ImageManifest field's value. func (s *PutImageInput) SetImageManifest(v string) *PutImageInput { s.ImageManifest = &v return s } +// SetImageManifestMediaType sets the ImageManifestMediaType field's value. +func (s *PutImageInput) SetImageManifestMediaType(v string) *PutImageInput { + s.ImageManifestMediaType = &v + return s +} + // SetImageTag sets the ImageTag field's value. func (s *PutImageInput) SetImageTag(v string) *PutImageInput { s.ImageTag = &v @@ -7315,6 +7631,62 @@ func (s *PutLifecyclePolicyOutput) SetRepositoryName(v string) *PutLifecyclePoli return s } +// The manifest list is referencing an image that does not exist. +type ReferencedImagesNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ReferencedImagesNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReferencedImagesNotFoundException) GoString() string { + return s.String() +} + +func newErrorReferencedImagesNotFoundException(v protocol.ResponseMetadata) error { + return &ReferencedImagesNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ReferencedImagesNotFoundException) Code() string { + return "ReferencedImagesNotFoundException" +} + +// Message returns the exception's message. +func (s *ReferencedImagesNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ReferencedImagesNotFoundException) OrigErr() error { + return nil +} + +func (s *ReferencedImagesNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ReferencedImagesNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ReferencedImagesNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + // An object representing a repository. type Repository struct { _ struct{} `type:"structure"` @@ -7322,6 +7694,10 @@ type Repository struct { // The date and time, in JavaScript date format, when the repository was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + // The encryption configuration for the repository. This determines how the + // contents of your repository are encrypted at rest. + EncryptionConfiguration *EncryptionConfiguration `locationName:"encryptionConfiguration" type:"structure"` + // The image scanning configuration for a repository. ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` @@ -7340,8 +7716,8 @@ type Repository struct { // The name of the repository. RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` - // The URI for the repository. You can use this URI for Docker push or pull - // operations. + // The URI for the repository. You can use this URI for container image push + // and pull operations. RepositoryUri *string `locationName:"repositoryUri" type:"string"` } @@ -7361,6 +7737,12 @@ func (s *Repository) SetCreatedAt(v time.Time) *Repository { return s } +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Repository) SetEncryptionConfiguration(v *EncryptionConfiguration) *Repository { + s.EncryptionConfiguration = v + return s +} + // SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. func (s *Repository) SetImageScanningConfiguration(v *ImageScanningConfiguration) *Repository { s.ImageScanningConfiguration = v @@ -7399,8 +7781,8 @@ func (s *Repository) SetRepositoryUri(v string) *Repository { // The specified repository already exists in the specified registry. type RepositoryAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7418,17 +7800,17 @@ func (s RepositoryAlreadyExistsException) GoString() string { func newErrorRepositoryAlreadyExistsException(v protocol.ResponseMetadata) error { return &RepositoryAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryAlreadyExistsException) Code() string { +func (s *RepositoryAlreadyExistsException) Code() string { return "RepositoryAlreadyExistsException" } // Message returns the exception's message. -func (s RepositoryAlreadyExistsException) Message() string { +func (s *RepositoryAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7436,29 +7818,29 @@ func (s RepositoryAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryAlreadyExistsException) OrigErr() error { +func (s *RepositoryAlreadyExistsException) OrigErr() error { return nil } -func (s RepositoryAlreadyExistsException) Error() string { +func (s *RepositoryAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified repository contains images. To delete a repository that contains // images, you must force the deletion with the force parameter. type RepositoryNotEmptyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7476,17 +7858,17 @@ func (s RepositoryNotEmptyException) GoString() string { func newErrorRepositoryNotEmptyException(v protocol.ResponseMetadata) error { return &RepositoryNotEmptyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryNotEmptyException) Code() string { +func (s *RepositoryNotEmptyException) Code() string { return "RepositoryNotEmptyException" } // Message returns the exception's message. -func (s RepositoryNotEmptyException) Message() string { +func (s *RepositoryNotEmptyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7494,29 +7876,29 @@ func (s RepositoryNotEmptyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryNotEmptyException) OrigErr() error { +func (s *RepositoryNotEmptyException) OrigErr() error { return nil } -func (s RepositoryNotEmptyException) Error() string { +func (s *RepositoryNotEmptyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryNotEmptyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryNotEmptyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryNotEmptyException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryNotEmptyException) RequestID() string { + return s.RespMetadata.RequestID } // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. type RepositoryNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7534,17 +7916,17 @@ func (s RepositoryNotFoundException) GoString() string { func newErrorRepositoryNotFoundException(v protocol.ResponseMetadata) error { return &RepositoryNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryNotFoundException) Code() string { +func (s *RepositoryNotFoundException) Code() string { return "RepositoryNotFoundException" } // Message returns the exception's message. -func (s RepositoryNotFoundException) Message() string { +func (s *RepositoryNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7552,29 +7934,29 @@ func (s RepositoryNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryNotFoundException) OrigErr() error { +func (s *RepositoryNotFoundException) OrigErr() error { return nil } -func (s RepositoryNotFoundException) Error() string { +func (s *RepositoryNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The specified repository and registry combination does not have an associated // repository policy. type RepositoryPolicyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7592,17 +7974,17 @@ func (s RepositoryPolicyNotFoundException) GoString() string { func newErrorRepositoryPolicyNotFoundException(v protocol.ResponseMetadata) error { return &RepositoryPolicyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryPolicyNotFoundException) Code() string { +func (s *RepositoryPolicyNotFoundException) Code() string { return "RepositoryPolicyNotFoundException" } // Message returns the exception's message. -func (s RepositoryPolicyNotFoundException) Message() string { +func (s *RepositoryPolicyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7610,29 +7992,29 @@ func (s RepositoryPolicyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryPolicyNotFoundException) OrigErr() error { +func (s *RepositoryPolicyNotFoundException) OrigErr() error { return nil } -func (s RepositoryPolicyNotFoundException) Error() string { +func (s *RepositoryPolicyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryPolicyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryPolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryPolicyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryPolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The specified image scan could not be found. Ensure that image scanning is // enabled on the repository and try again. type ScanNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7649,17 +8031,17 @@ func (s ScanNotFoundException) GoString() string { func newErrorScanNotFoundException(v protocol.ResponseMetadata) error { return &ScanNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ScanNotFoundException) Code() string { +func (s *ScanNotFoundException) Code() string { return "ScanNotFoundException" } // Message returns the exception's message. -func (s ScanNotFoundException) Message() string { +func (s *ScanNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7667,28 +8049,28 @@ func (s ScanNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ScanNotFoundException) OrigErr() error { +func (s *ScanNotFoundException) OrigErr() error { return nil } -func (s ScanNotFoundException) Error() string { +func (s *ScanNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ScanNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ScanNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ScanNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ScanNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // These errors are usually caused by a server-side issue. type ServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7706,17 +8088,17 @@ func (s ServerException) GoString() string { func newErrorServerException(v protocol.ResponseMetadata) error { return &ServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServerException) Code() string { +func (s *ServerException) Code() string { return "ServerException" } // Message returns the exception's message. -func (s ServerException) Message() string { +func (s *ServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7724,22 +8106,22 @@ func (s ServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServerException) OrigErr() error { +func (s *ServerException) OrigErr() error { return nil } -func (s ServerException) Error() string { +func (s *ServerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID } type SetRepositoryPolicyInput struct { @@ -7751,7 +8133,7 @@ type SetRepositoryPolicyInput struct { Force *bool `locationName:"force" type:"boolean"` // The JSON repository policy text to apply to the repository. For more information, - // see Amazon ECR Repository Policy Examples (https://docs.aws.amazon.com/AmazonECR/latest/userguide/RepositoryPolicyExamples.html) + // see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) // in the Amazon Elastic Container Registry User Guide. // // PolicyText is a required field @@ -8206,8 +8588,8 @@ func (s TagResourceOutput) GoString() string { // The list of tags on the repository is over the limit. The maximum number // of tags that can be applied to a repository is 50. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8224,17 +8606,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8242,22 +8624,78 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The image is of a type that cannot be scanned. +type UnsupportedImageTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedImageTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedImageTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedImageTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedImageTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedImageTypeException) Code() string { + return "UnsupportedImageTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedImageTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedImageTypeException) OrigErr() error { + return nil +} + +func (s *UnsupportedImageTypeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedImageTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedImageTypeException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -8337,12 +8775,14 @@ type UploadLayerPartInput struct { // LayerPartBlob is a required field LayerPartBlob []byte `locationName:"layerPartBlob" type:"blob" required:"true"` - // The integer value of the first byte of the layer part. + // The position of the first byte of the layer part witin the overall image + // layer. // // PartFirstByte is a required field PartFirstByte *int64 `locationName:"partFirstByte" type:"long" required:"true"` - // The integer value of the last byte of the layer part. + // The position of the last byte of the layer part within the overall image + // layer. // // PartLastByte is a required field PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"` @@ -8487,11 +8927,11 @@ func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput { return s } -// The upload could not be found, or the specified upload id is not valid for +// The upload could not be found, or the specified upload ID is not valid for // this repository. type UploadNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -8509,17 +8949,17 @@ func (s UploadNotFoundException) GoString() string { func newErrorUploadNotFoundException(v protocol.ResponseMetadata) error { return &UploadNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UploadNotFoundException) Code() string { +func (s *UploadNotFoundException) Code() string { return "UploadNotFoundException" } // Message returns the exception's message. -func (s UploadNotFoundException) Message() string { +func (s *UploadNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8527,22 +8967,38 @@ func (s UploadNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UploadNotFoundException) OrigErr() error { +func (s *UploadNotFoundException) OrigErr() error { return nil } -func (s UploadNotFoundException) Error() string { +func (s *UploadNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UploadNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UploadNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UploadNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *UploadNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +const ( + // EncryptionTypeAes256 is a EncryptionType enum value + EncryptionTypeAes256 = "AES256" + + // EncryptionTypeKms is a EncryptionType enum value + EncryptionTypeKms = "KMS" +) + +// EncryptionType_Values returns all elements of the EncryptionType enum +func EncryptionType_Values() []string { + return []string{ + EncryptionTypeAes256, + EncryptionTypeKms, + } } const ( @@ -8565,11 +9021,30 @@ const ( FindingSeverityUndefined = "UNDEFINED" ) +// FindingSeverity_Values returns all elements of the FindingSeverity enum +func FindingSeverity_Values() []string { + return []string{ + FindingSeverityInformational, + FindingSeverityLow, + FindingSeverityMedium, + FindingSeverityHigh, + FindingSeverityCritical, + FindingSeverityUndefined, + } +} + const ( // ImageActionTypeExpire is a ImageActionType enum value ImageActionTypeExpire = "EXPIRE" ) +// ImageActionType_Values returns all elements of the ImageActionType enum +func ImageActionType_Values() []string { + return []string{ + ImageActionTypeExpire, + } +} + const ( // ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" @@ -8585,8 +9060,27 @@ const ( // ImageFailureCodeMissingDigestAndTag is a ImageFailureCode enum value ImageFailureCodeMissingDigestAndTag = "MissingDigestAndTag" + + // ImageFailureCodeImageReferencedByManifestList is a ImageFailureCode enum value + ImageFailureCodeImageReferencedByManifestList = "ImageReferencedByManifestList" + + // ImageFailureCodeKmsError is a ImageFailureCode enum value + ImageFailureCodeKmsError = "KmsError" ) +// ImageFailureCode_Values returns all elements of the ImageFailureCode enum +func ImageFailureCode_Values() []string { + return []string{ + ImageFailureCodeInvalidImageDigest, + ImageFailureCodeInvalidImageTag, + ImageFailureCodeImageTagDoesNotMatchDigest, + ImageFailureCodeImageNotFound, + ImageFailureCodeMissingDigestAndTag, + ImageFailureCodeImageReferencedByManifestList, + ImageFailureCodeKmsError, + } +} + const ( // ImageTagMutabilityMutable is a ImageTagMutability enum value ImageTagMutabilityMutable = "MUTABLE" @@ -8595,6 +9089,14 @@ const ( ImageTagMutabilityImmutable = "IMMUTABLE" ) +// ImageTagMutability_Values returns all elements of the ImageTagMutability enum +func ImageTagMutability_Values() []string { + return []string{ + ImageTagMutabilityMutable, + ImageTagMutabilityImmutable, + } +} + const ( // LayerAvailabilityAvailable is a LayerAvailability enum value LayerAvailabilityAvailable = "AVAILABLE" @@ -8603,6 +9105,14 @@ const ( LayerAvailabilityUnavailable = "UNAVAILABLE" ) +// LayerAvailability_Values returns all elements of the LayerAvailability enum +func LayerAvailability_Values() []string { + return []string{ + LayerAvailabilityAvailable, + LayerAvailabilityUnavailable, + } +} + const ( // LayerFailureCodeInvalidLayerDigest is a LayerFailureCode enum value LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest" @@ -8611,6 +9121,14 @@ const ( LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" ) +// LayerFailureCode_Values returns all elements of the LayerFailureCode enum +func LayerFailureCode_Values() []string { + return []string{ + LayerFailureCodeInvalidLayerDigest, + LayerFailureCodeMissingLayerDigest, + } +} + const ( // LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS" @@ -8625,6 +9143,16 @@ const ( LifecyclePolicyPreviewStatusFailed = "FAILED" ) +// LifecyclePolicyPreviewStatus_Values returns all elements of the LifecyclePolicyPreviewStatus enum +func LifecyclePolicyPreviewStatus_Values() []string { + return []string{ + LifecyclePolicyPreviewStatusInProgress, + LifecyclePolicyPreviewStatusComplete, + LifecyclePolicyPreviewStatusExpired, + LifecyclePolicyPreviewStatusFailed, + } +} + const ( // ScanStatusInProgress is a ScanStatus enum value ScanStatusInProgress = "IN_PROGRESS" @@ -8636,6 +9164,15 @@ const ( ScanStatusFailed = "FAILED" ) +// ScanStatus_Values returns all elements of the ScanStatus enum +func ScanStatus_Values() []string { + return []string{ + ScanStatusInProgress, + ScanStatusComplete, + ScanStatusFailed, + } +} + const ( // TagStatusTagged is a TagStatus enum value TagStatusTagged = "TAGGED" @@ -8646,3 +9183,12 @@ const ( // TagStatusAny is a TagStatus enum value TagStatusAny = "ANY" ) + +// TagStatus_Values returns all elements of the TagStatus enum +func TagStatus_Values() []string { + return []string{ + TagStatusTagged, + TagStatusUntagged, + TagStatusAny, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go index d970974bcda6..3c3843ae3e57 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go @@ -3,12 +3,13 @@ // Package ecr provides the client and types for making API // requests to Amazon EC2 Container Registry. // -// Amazon Elastic Container Registry (Amazon ECR) is a managed Docker registry -// service. Customers can use the familiar Docker CLI to push, pull, and manage -// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon -// ECR supports private Docker repositories with resource-based permissions +// Amazon Elastic Container Registry (Amazon ECR) is a managed container image +// registry service. Customers can use the familiar Docker CLI, or their preferred +// client, to push, pull, and manage images. Amazon ECR provides a secure, scalable, +// and reliable registry for your Docker or Open Container Initiative (OCI) +// images. Amazon ECR supports private repositories with resource-based permissions // using IAM so that specific users or Amazon EC2 instances can access repositories -// and images. Developers can use the Docker CLI to author and manage images. +// and images. // // See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go index 732d865bf404..8191013264f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -21,6 +21,13 @@ const ( // the manifest or image tag after the last push. ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException" + // ErrCodeImageDigestDoesNotMatchException for service response error code + // "ImageDigestDoesNotMatchException". + // + // The specified image digest does not match the digest that Amazon ECR calculated + // for the image. + ErrCodeImageDigestDoesNotMatchException = "ImageDigestDoesNotMatchException" + // ErrCodeImageNotFoundException for service response error code // "ImageNotFoundException". // @@ -63,6 +70,12 @@ const ( // characters. ErrCodeInvalidTagParameterException = "InvalidTagParameterException" + // ErrCodeKmsException for service response error code + // "KmsException". + // + // The operation failed due to a KMS exception. + ErrCodeKmsException = "KmsException" + // ErrCodeLayerAlreadyExistsException for service response error code // "LayerAlreadyExistsException". // @@ -98,8 +111,8 @@ const ( // ErrCodeLifecyclePolicyPreviewInProgressException for service response error code // "LifecyclePolicyPreviewInProgressException". // - // The previous lifecycle policy preview request has not completed. Please try - // again later. + // The previous lifecycle policy preview request has not completed. Wait and + // try again. ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException" // ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code @@ -112,11 +125,16 @@ const ( // "LimitExceededException". // // The operation did not succeed because it would have exceeded a service limit - // for your account. For more information, see Amazon ECR Default Service Limits - // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) + // for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. ErrCodeLimitExceededException = "LimitExceededException" + // ErrCodeReferencedImagesNotFoundException for service response error code + // "ReferencedImagesNotFoundException". + // + // The manifest list is referencing an image that does not exist. + ErrCodeReferencedImagesNotFoundException = "ReferencedImagesNotFoundException" + // ErrCodeRepositoryAlreadyExistsException for service response error code // "RepositoryAlreadyExistsException". // @@ -164,10 +182,16 @@ const ( // of tags that can be applied to a repository is 50. ErrCodeTooManyTagsException = "TooManyTagsException" + // ErrCodeUnsupportedImageTypeException for service response error code + // "UnsupportedImageTypeException". + // + // The image is of a type that cannot be scanned. + ErrCodeUnsupportedImageTypeException = "UnsupportedImageTypeException" + // ErrCodeUploadNotFoundException for service response error code // "UploadNotFoundException". // - // The upload could not be found, or the specified upload id is not valid for + // The upload could not be found, or the specified upload ID is not valid for // this repository. ErrCodeUploadNotFoundException = "UploadNotFoundException" ) @@ -175,12 +199,14 @@ const ( var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "EmptyUploadException": newErrorEmptyUploadException, "ImageAlreadyExistsException": newErrorImageAlreadyExistsException, + "ImageDigestDoesNotMatchException": newErrorImageDigestDoesNotMatchException, "ImageNotFoundException": newErrorImageNotFoundException, "ImageTagAlreadyExistsException": newErrorImageTagAlreadyExistsException, "InvalidLayerException": newErrorInvalidLayerException, "InvalidLayerPartException": newErrorInvalidLayerPartException, "InvalidParameterException": newErrorInvalidParameterException, "InvalidTagParameterException": newErrorInvalidTagParameterException, + "KmsException": newErrorKmsException, "LayerAlreadyExistsException": newErrorLayerAlreadyExistsException, "LayerInaccessibleException": newErrorLayerInaccessibleException, "LayerPartTooSmallException": newErrorLayerPartTooSmallException, @@ -189,6 +215,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "LifecyclePolicyPreviewInProgressException": newErrorLifecyclePolicyPreviewInProgressException, "LifecyclePolicyPreviewNotFoundException": newErrorLifecyclePolicyPreviewNotFoundException, "LimitExceededException": newErrorLimitExceededException, + "ReferencedImagesNotFoundException": newErrorReferencedImagesNotFoundException, "RepositoryAlreadyExistsException": newErrorRepositoryAlreadyExistsException, "RepositoryNotEmptyException": newErrorRepositoryNotEmptyException, "RepositoryNotFoundException": newErrorRepositoryNotFoundException, @@ -196,5 +223,6 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "ScanNotFoundException": newErrorScanNotFoundException, "ServerException": newErrorServerException, "TooManyTagsException": newErrorTooManyTagsException, + "UnsupportedImageTypeException": newErrorUnsupportedImageTypeException, "UploadNotFoundException": newErrorUploadNotFoundException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go index c2e93fa72d33..00eba43323ab 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go @@ -64,7 +64,7 @@ func (c *ELB) AddTagsRequest(input *AddTagsInput) (req *request.Request, output // Each tag consists of a key and an optional value. If a tag with the same // key is already associated with the load balancer, AddTags updates its value. // -// For more information, see Tag Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) +// For more information, see Tag Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -155,7 +155,7 @@ func (c *ELB) ApplySecurityGroupsToLoadBalancerRequest(input *ApplySecurityGroup // private cloud (VPC). The specified security groups override the previously // associated security groups. // -// For more information, see Security Groups for Load Balancers in a VPC (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-groups.html#elb-vpc-security-groups) +// For more information, see Security Groups for Load Balancers in a VPC (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-groups.html#elb-vpc-security-groups) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -246,7 +246,7 @@ func (c *ELB) AttachLoadBalancerToSubnetsRequest(input *AttachLoadBalancerToSubn // // The load balancer evenly distributes requests across all registered subnets. // For more information, see Add or Remove Subnets for Your Load Balancer in -// a VPC (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-manage-subnets.html) +// a VPC (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-manage-subnets.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -339,7 +339,7 @@ func (c *ELB) ConfigureHealthCheckRequest(input *ConfigureHealthCheckInput) (req // of your EC2 instances. // // For more information, see Configure Health Checks for Your Load Balancer -// (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html) +// (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -433,7 +433,7 @@ func (c *ELB) CreateAppCookieStickinessPolicyRequest(input *CreateAppCookieStick // If the application cookie is explicitly removed or expires, the session stops // being sticky until a new application cookie is issued. // -// For more information, see Application-Controlled Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) +// For more information, see Application-Controlled Session Stickiness (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -538,7 +538,7 @@ func (c *ELB) CreateLBCookieStickinessPolicyRequest(input *CreateLBCookieStickin // the same user to that server. The validity of the cookie is based on the // cookie expiration time, which is specified in the policy configuration. // -// For more information, see Duration-Based Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration) +// For more information, see Duration-Based Session Stickiness (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -638,7 +638,7 @@ func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *re // // You can create up to 20 load balancers per region per account. You can request // an increase for the number of load balancers for your account. For more information, -// see Limits for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) +// see Limits for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -762,7 +762,7 @@ func (c *ELB) CreateLoadBalancerListenersRequest(input *CreateLoadBalancerListen // the properties of the new listener must match the properties of the existing // listener. // -// For more information, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) +// For more information, see Listeners for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1209,7 +1209,7 @@ func (c *ELB) DeregisterInstancesFromLoadBalancerRequest(input *DeregisterInstan // You can use DescribeLoadBalancers to verify that the instance is deregistered // from the load balancer. // -// For more information, see Register or De-Register EC2 Instances (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) +// For more information, see Register or De-Register EC2 Instances (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1295,7 +1295,7 @@ func (c *ELB) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (r // Describes the current Elastic Load Balancing resource limits for your AWS // account. // -// For more information, see Limits for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) +// For more information, see Limits for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2037,7 +2037,7 @@ func (c *ELB) DisableAvailabilityZonesForLoadBalancerRequest(input *DisableAvail // the OutOfService state. Then, the load balancer attempts to equally balance // the traffic among its remaining Availability Zones. // -// For more information, see Add or Remove Availability Zones (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) +// For more information, see Add or Remove Availability Zones (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2127,7 +2127,7 @@ func (c *ELB) EnableAvailabilityZonesForLoadBalancerRequest(input *EnableAvailab // // The load balancer evenly distributes requests across all its registered Availability // Zones that contain instances. For more information, see Add or Remove Availability -// Zones (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) +// Zones (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2216,13 +2216,13 @@ func (c *ELB) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttri // // For more information, see the following in the Classic Load Balancers Guide: // -// * Cross-Zone Load Balancing (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) +// * Cross-Zone Load Balancing (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) // -// * Connection Draining (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) +// * Connection Draining (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) // -// * Access Logs (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html) +// * Access Logs (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html) // -// * Idle Connection Timeout (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) +// * Idle Connection Timeout (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2328,7 +2328,7 @@ func (c *ELB) RegisterInstancesWithLoadBalancerRequest(input *RegisterInstancesW // // To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. // -// For more information, see Register or De-Register EC2 Instances (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) +// For more information, see Register or De-Register EC2 Instances (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2497,7 +2497,7 @@ func (c *ELB) SetLoadBalancerListenerSSLCertificateRequest(input *SetLoadBalance // the same load balancer and port. // // For more information about updating your SSL certificate, see Replace the -// SSL Certificate for Your Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-update-ssl-cert.html) +// SSL Certificate for Your Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-update-ssl-cert.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2605,9 +2605,9 @@ func (c *ELB) SetLoadBalancerPoliciesForBackendServerRequest(input *SetLoadBalan // that the policy is associated with the EC2 instance. // // For more information about enabling back-end instance authentication, see -// Configure Back-end Instance Authentication (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html#configure_backendauth_clt) +// Configure Back-end Instance Authentication (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html#configure_backendauth_clt) // in the Classic Load Balancers Guide. For more information about Proxy Protocol, -// see Configure Proxy Protocol Support (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-proxy-protocol.html) +// see Configure Proxy Protocol Support (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-proxy-protocol.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2700,9 +2700,9 @@ func (c *ELB) SetLoadBalancerPoliciesOfListenerRequest(input *SetLoadBalancerPol // To enable back-end server authentication, use SetLoadBalancerPoliciesForBackendServer. // // For more information about setting policies, see Update the SSL Negotiation -// Configuration (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/ssl-config-update.html), -// Duration-Based Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration), -// and Application-Controlled Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) +// Configuration (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/ssl-config-update.html), +// Duration-Based Session Stickiness (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration), +// and Application-Controlled Session Stickiness (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2899,14 +2899,20 @@ func (s AddTagsOutput) GoString() string { return s.String() } -// This data type is reserved. +// Information about additional load balancer attributes. type AdditionalAttribute struct { _ struct{} `type:"structure"` - // This parameter is reserved. + // The name of the attribute. + // + // The following attribute is supported. + // + // * elb.http.desyncmitigationmode - Determines how the load balancer handles + // requests that might pose a security risk to your application. The possible + // values are monitor, defensive, and strictest. The default is defensive. Key *string `type:"string"` - // This parameter is reserved. + // This value of the attribute. Value *string `type:"string"` } @@ -3509,7 +3515,7 @@ type CreateLoadBalancerInput struct { // The listeners. // - // For more information, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) + // For more information, see Listeners for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) // in the Classic Load Balancers Guide. // // Listeners is a required field @@ -3529,7 +3535,7 @@ type CreateLoadBalancerInput struct { // By default, Elastic Load Balancing creates an Internet-facing load balancer // with a DNS name that resolves to public IP addresses. For more information // about Internet-facing and Internal load balancers, see Load Balancer Scheme - // (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/how-elastic-load-balancing-works.html#load-balancer-scheme) + // (https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/how-elastic-load-balancing-works.html#load-balancer-scheme) // in the Elastic Load Balancing User Guide. // // Specify internal to create a load balancer with a DNS name that resolves @@ -3546,7 +3552,7 @@ type CreateLoadBalancerInput struct { // A list of tags to assign to the load balancer. // // For more information about tagging your load balancer, see Tag Your Classic - // Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) + // Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) // in the Classic Load Balancers Guide. Tags []*Tag `min:"1" type:"list"` } @@ -5166,7 +5172,7 @@ func (s *Limit) SetName(v string) *Limit { // Information about a listener. // // For information about the protocols and the ports supported by Elastic Load -// Balancing, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) +// Balancing, see Listeners for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) // in the Classic Load Balancers Guide. type Listener struct { _ struct{} `type:"structure"` @@ -5179,8 +5185,9 @@ type Listener struct { // The protocol to use for routing traffic to instances: HTTP, HTTPS, TCP, or // SSL. // - // If the front-end protocol is HTTP, HTTPS, TCP, or SSL, InstanceProtocol must - // be at the same protocol. + // If the front-end protocol is TCP or SSL, the back-end protocol must be TCP + // or SSL. If the front-end protocol is HTTP or HTTPS, the back-end protocol + // must be HTTP or HTTPS. // // If there is another listener with the same InstancePort whose InstanceProtocol // is secure, (HTTPS or SSL), the listener's InstanceProtocol must also be secure. @@ -5308,17 +5315,17 @@ type LoadBalancerAttributes struct { // If enabled, the load balancer captures detailed information of all requests // and delivers the information to the Amazon S3 bucket that you specify. // - // For more information, see Enable Access Logs (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html) + // For more information, see Enable Access Logs (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html) // in the Classic Load Balancers Guide. AccessLog *AccessLog `type:"structure"` - // This parameter is reserved. + // Any additional attributes. AdditionalAttributes []*AdditionalAttribute `type:"list"` // If enabled, the load balancer allows existing requests to complete before // the load balancer shifts traffic away from a deregistered or unhealthy instance. // - // For more information, see Configure Connection Draining (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) + // For more information, see Configure Connection Draining (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) // in the Classic Load Balancers Guide. ConnectionDraining *ConnectionDraining `type:"structure"` @@ -5327,14 +5334,14 @@ type LoadBalancerAttributes struct { // // By default, Elastic Load Balancing maintains a 60-second idle connection // timeout for both front-end and back-end connections of your load balancer. - // For more information, see Configure Idle Connection Timeout (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) + // For more information, see Configure Idle Connection Timeout (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) // in the Classic Load Balancers Guide. ConnectionSettings *ConnectionSettings `type:"structure"` // If enabled, the load balancer routes the request traffic evenly across all // instances regardless of the Availability Zones. // - // For more information, see Configure Cross-Zone Load Balancing (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) + // For more information, see Configure Cross-Zone Load Balancing (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) // in the Classic Load Balancers Guide. CrossZoneLoadBalancing *CrossZoneLoadBalancing `type:"structure"` } @@ -5421,7 +5428,7 @@ type LoadBalancerDescription struct { // The DNS name of the load balancer. // - // For more information, see Configure a Custom Domain Name (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/using-domain-names-with-elb.html) + // For more information, see Configure a Custom Domain Name (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/using-domain-names-with-elb.html) // in the Classic Load Balancers Guide. CanonicalHostedZoneName *string `type:"string"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go index 0b93ed4740be..5fa8e5154e58 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go @@ -15,7 +15,7 @@ // Elastic Load Balancing supports three types of load balancers: Application // Load Balancers, Network Load Balancers, and Classic Load Balancers. You can // select a load balancer based on your application needs. For more information, -// see the Elastic Load Balancing User Guide (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/). +// see the Elastic Load Balancing User Guide (https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/). // // This reference covers the 2012-06-01 API, which supports Classic Load Balancers. // The 2015-12-01 API supports Application Load Balancers and Network Load Balancers. diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go index 2e2c18c04bd5..254ddb44f5d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go @@ -155,8 +155,8 @@ func (c *ELBV2) AddTagsRequest(input *AddTagsInput) (req *request.Request, outpu // AddTags API operation for Elastic Load Balancing. // // Adds the specified tags to the specified Elastic Load Balancing resource. -// You can tag your Application Load Balancers, Network Load Balancers, and -// your target groups. +// You can tag your Application Load Balancers, Network Load Balancers, target +// groups, listeners, and rules. // // Each tag consists of a key and an optional value. If a resource already has // a tag with the same key, AddTags updates its value. @@ -325,6 +325,12 @@ func (c *ELBV2) CreateListenerRequest(input *CreateListenerInput) (req *request. // across all listeners. If a target group is used by multiple actions for a // load balancer, it is counted as only one use. // +// * ErrCodeALPNPolicyNotSupportedException "ALPNPolicyNotFound" +// The specified ALPN policy is not supported. +// +// * ErrCodeTooManyTagsException "TooManyTags" +// You've reached the limit on the number of tags per load balancer. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateListener func (c *ELBV2) CreateListener(input *CreateListenerInput) (*CreateListenerOutput, error) { req, out := c.CreateListenerRequest(input) @@ -530,6 +536,7 @@ func (c *ELBV2) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, // Creates a rule for the specified listener. The listener must be associated // with an Application Load Balancer. // +// Each rule consists of a priority, one or more actions, and one or more conditions. // Rules are evaluated in priority order, from the lowest value to the highest // value. When the conditions for a rule are met, its actions are performed. // If the conditions for no rules are met, the actions for the default rule @@ -593,6 +600,9 @@ func (c *ELBV2) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, // across all listeners. If a target group is used by multiple actions for a // load balancer, it is counted as only one use. // +// * ErrCodeTooManyTagsException "TooManyTags" +// You've reached the limit on the number of tags per load balancer. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateRule func (c *ELBV2) CreateRule(input *CreateRuleInput) (*CreateRuleOutput, error) { req, out := c.CreateRuleRequest(input) @@ -697,6 +707,9 @@ func (c *ELBV2) CreateTargetGroupRequest(input *CreateTargetGroupInput) (req *re // * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration is not valid. // +// * ErrCodeTooManyTagsException "TooManyTags" +// You've reached the limit on the number of tags per load balancer. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateTargetGroup func (c *ELBV2) CreateTargetGroup(input *CreateTargetGroupInput) (*CreateTargetGroupOutput, error) { req, out := c.CreateTargetGroupRequest(input) @@ -944,6 +957,8 @@ func (c *ELBV2) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, // // Deletes the specified rule. // +// You can't delete the default rule. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1901,9 +1916,9 @@ func (c *ELBV2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Requ // DescribeTags API operation for Elastic Load Balancing. // -// Describes the tags for the specified resources. You can describe the tags -// for one or more Application Load Balancers, Network Load Balancers, and target -// groups. +// Describes the tags for the specified Elastic Load Balancing resources. You +// can describe the tags for one or more Application Load Balancers, Network +// Load Balancers, target groups, listeners, or rules. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2378,6 +2393,9 @@ func (c *ELBV2) ModifyListenerRequest(input *ModifyListenerInput) (req *request. // across all listeners. If a target group is used by multiple actions for a // load balancer, it is counted as only one use. // +// * ErrCodeALPNPolicyNotSupportedException "ALPNPolicyNotFound" +// The specified ALPN policy is not supported. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyListener func (c *ELBV2) ModifyListener(input *ModifyListenerInput) (*ModifyListenerOutput, error) { req, out := c.ModifyListenerRequest(input) @@ -3013,7 +3031,9 @@ func (c *ELBV2) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, // RemoveTags API operation for Elastic Load Balancing. // -// Removes the specified tags from the specified Elastic Load Balancing resource. +// Removes the specified tags from the specified Elastic Load Balancing resources. +// You can remove the tags for one or more Application Load Balancers, Network +// Load Balancers, target groups, listeners, or rules. // // To list the current tags for your resources, use DescribeTags. // @@ -3426,6 +3446,9 @@ func (c *ELBV2) SetSubnetsWithContext(ctx aws.Context, input *SetSubnetsInput, o } // Information about an action. +// +// Each rule must include exactly one of the following types of actions: forward, +// fixed-response, or redirect, and it must be the last action to be performed. type Action struct { _ struct{} `type:"structure"` @@ -3449,9 +3472,7 @@ type Action struct { ForwardConfig *ForwardActionConfig `type:"structure"` // The order for the action. This value is required for rules with multiple - // actions. The action with the lowest value for order is performed first. The - // last action to be performed must be one of the following types of actions: - // a forward, fixed-response, or redirect. + // actions. The action with the lowest value for order is performed first. Order *int64 `min:"1" type:"integer"` // [Application Load Balancer] Information for creating a redirect action. Specify @@ -3647,7 +3668,7 @@ type AddTagsInput struct { // ResourceArns is a required field ResourceArns []*string `type:"list" required:"true"` - // The tags. Each resource can have a maximum of 10 tags. + // The tags. // // Tags is a required field Tags []*Tag `min:"1" type:"list" required:"true"` @@ -4036,6 +4057,9 @@ type AvailabilityZone struct { // a private IP address from the IPv4 range of the subnet. LoadBalancerAddresses []*LoadBalancerAddress `type:"list"` + // [Application Load Balancers on Outposts] The ID of the Outpost. + OutpostId *string `type:"string"` + // The ID of the subnet. You can specify one subnet per Availability Zone. SubnetId *string `type:"string"` @@ -4059,6 +4083,12 @@ func (s *AvailabilityZone) SetLoadBalancerAddresses(v []*LoadBalancerAddress) *A return s } +// SetOutpostId sets the OutpostId field's value. +func (s *AvailabilityZone) SetOutpostId(v string) *AvailabilityZone { + s.OutpostId = &v + return s +} + // SetSubnetId sets the SubnetId field's value. func (s *AvailabilityZone) SetSubnetId(v string) *AvailabilityZone { s.SubnetId = &v @@ -4143,6 +4173,23 @@ func (s *Cipher) SetPriority(v int64) *Cipher { type CreateListenerInput struct { _ struct{} `type:"structure"` + // [TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) + // policy. You can specify one policy name. The following are the possible values: + // + // * HTTP1Only + // + // * HTTP2Only + // + // * HTTP2Optional + // + // * HTTP2Preferred + // + // * None + // + // For more information, see ALPN Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#alpn-policies) + // in the Network Load Balancers Guide. + AlpnPolicy []*string `type:"list"` + // [HTTPS and TLS listeners] The default certificate for the listener. You must // provide exactly one certificate. Set CertificateArn to the certificate ARN // but do not set IsDefault. @@ -4150,25 +4197,7 @@ type CreateListenerInput struct { // To create a certificate list for the listener, use AddListenerCertificates. Certificates []*Certificate `type:"list"` - // The actions for the default rule. The rule must include one forward action - // or one or more fixed-response actions. - // - // If the action type is forward, you specify one or more target groups. The - // protocol of the target group must be HTTP or HTTPS for an Application Load - // Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP - // for a Network Load Balancer. - // - // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate - // users through an identity provider that is OpenID Connect (OIDC) compliant. - // - // [HTTPS listeners] If the action type is authenticate-cognito, you authenticate - // users through the user pools supported by Amazon Cognito. - // - // [Application Load Balancer] If the action type is redirect, you redirect - // specified client requests from one URL to another. - // - // [Application Load Balancer] If the action type is fixed-response, you drop - // specified client requests and return a custom HTTP response. + // The actions for the default rule. // // DefaultActions is a required field DefaultActions []*Action `type:"list" required:"true"` @@ -4190,10 +4219,34 @@ type CreateListenerInput struct { // Protocol is a required field Protocol *string `type:"string" required:"true" enum:"ProtocolEnum"` - // [HTTPS and TLS listeners] The security policy that defines which ciphers - // and protocols are supported. The default is the current predefined security - // policy. + // [HTTPS and TLS listeners] The security policy that defines which protocols + // and ciphers are supported. The following are the possible values: + // + // * ELBSecurityPolicy-2016-08 + // + // * ELBSecurityPolicy-TLS-1-0-2015-04 + // + // * ELBSecurityPolicy-TLS-1-1-2017-01 + // + // * ELBSecurityPolicy-TLS-1-2-2017-01 + // + // * ELBSecurityPolicy-TLS-1-2-Ext-2018-06 + // + // * ELBSecurityPolicy-FS-2018-06 + // + // * ELBSecurityPolicy-FS-1-1-2019-08 + // + // * ELBSecurityPolicy-FS-1-2-2019-08 + // + // * ELBSecurityPolicy-FS-1-2-Res-2019-08 + // + // For more information, see Security Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) + // in the Application Load Balancers Guide and Security Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) + // in the Network Load Balancers Guide. SslPolicy *string `type:"string"` + + // The tags to assign to the listener. + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -4224,6 +4277,9 @@ func (s *CreateListenerInput) Validate() error { if s.Protocol == nil { invalidParams.Add(request.NewErrParamRequired("Protocol")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.DefaultActions != nil { for i, v := range s.DefaultActions { if v == nil { @@ -4234,6 +4290,16 @@ func (s *CreateListenerInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4241,6 +4307,12 @@ func (s *CreateListenerInput) Validate() error { return nil } +// SetAlpnPolicy sets the AlpnPolicy field's value. +func (s *CreateListenerInput) SetAlpnPolicy(v []*string) *CreateListenerInput { + s.AlpnPolicy = v + return s +} + // SetCertificates sets the Certificates field's value. func (s *CreateListenerInput) SetCertificates(v []*Certificate) *CreateListenerInput { s.Certificates = v @@ -4277,6 +4349,12 @@ func (s *CreateListenerInput) SetSslPolicy(v string) *CreateListenerInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateListenerInput) SetTags(v []*Tag) *CreateListenerInput { + s.Tags = v + return s +} + type CreateListenerOutput struct { _ struct{} `type:"structure"` @@ -4303,6 +4381,10 @@ func (s *CreateListenerOutput) SetListeners(v []*Listener) *CreateListenerOutput type CreateLoadBalancerInput struct { _ struct{} `type:"structure"` + // [Application Load Balancers on Outposts] The ID of the customer-owned address + // pool (CoIP pool). + CustomerOwnedIpv4Pool *string `type:"string"` + // [Application Load Balancers] The type of IP addresses used by the subnets // for your load balancer. The possible values are ipv4 (for IPv4 addresses) // and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must @@ -4341,6 +4423,11 @@ type CreateLoadBalancerInput struct { // [Application Load Balancers] You must specify subnets from at least two Availability // Zones. You cannot specify Elastic IP addresses for your subnets. // + // [Application Load Balancers on Outposts] You must specify one Outpost subnet. + // + // [Application Load Balancers on Local Zones] You can specify subnets from + // one or more Local Zones. + // // [Network Load Balancers] You can specify subnets from one or more Availability // Zones. You can specify one Elastic IP address per subnet if you need static // IP addresses for your internet-facing load balancer. For internal load balancers, @@ -4354,11 +4441,16 @@ type CreateLoadBalancerInput struct { // [Application Load Balancers] You must specify subnets from at least two Availability // Zones. // + // [Application Load Balancers on Outposts] You must specify one Outpost subnet. + // + // [Application Load Balancers on Local Zones] You can specify subnets from + // one or more Local Zones. + // // [Network Load Balancers] You can specify subnets from one or more Availability // Zones. Subnets []*string `type:"list"` - // One or more tags to assign to the load balancer. + // The tags to assign to the load balancer. Tags []*Tag `min:"1" type:"list"` // The type of load balancer. The default is application. @@ -4401,6 +4493,12 @@ func (s *CreateLoadBalancerInput) Validate() error { return nil } +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *CreateLoadBalancerInput) SetCustomerOwnedIpv4Pool(v string) *CreateLoadBalancerInput { + s.CustomerOwnedIpv4Pool = &v + return s +} + // SetIpAddressType sets the IpAddressType field's value. func (s *CreateLoadBalancerInput) SetIpAddressType(v string) *CreateLoadBalancerInput { s.IpAddressType = &v @@ -4475,33 +4573,12 @@ func (s *CreateLoadBalancerOutput) SetLoadBalancers(v []*LoadBalancer) *CreateLo type CreateRuleInput struct { _ struct{} `type:"structure"` - // The actions. Each rule must include exactly one of the following types of - // actions: forward, fixed-response, or redirect, and it must be the last action - // to be performed. - // - // If the action type is forward, you specify one or more target groups. The - // protocol of the target group must be HTTP or HTTPS for an Application Load - // Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP - // for a Network Load Balancer. - // - // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate - // users through an identity provider that is OpenID Connect (OIDC) compliant. - // - // [HTTPS listeners] If the action type is authenticate-cognito, you authenticate - // users through the user pools supported by Amazon Cognito. - // - // [Application Load Balancer] If the action type is redirect, you redirect - // specified client requests from one URL to another. - // - // [Application Load Balancer] If the action type is fixed-response, you drop - // specified client requests and return a custom HTTP response. + // The actions. // // Actions is a required field Actions []*Action `type:"list" required:"true"` - // The conditions. Each rule can include zero or one of the following conditions: - // http-request-method, host-header, path-pattern, and source-ip, and zero or - // more of the following conditions: http-header and query-string. + // The conditions. // // Conditions is a required field Conditions []*RuleCondition `type:"list" required:"true"` @@ -4515,6 +4592,9 @@ type CreateRuleInput struct { // // Priority is a required field Priority *int64 `min:"1" type:"integer" required:"true"` + + // The tags to assign to the rule. + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -4545,6 +4625,9 @@ func (s *CreateRuleInput) Validate() error { if s.Priority != nil && *s.Priority < 1 { invalidParams.Add(request.NewErrParamMinValue("Priority", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.Actions != nil { for i, v := range s.Actions { if v == nil { @@ -4555,6 +4638,16 @@ func (s *CreateRuleInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4586,6 +4679,12 @@ func (s *CreateRuleInput) SetPriority(v int64) *CreateRuleInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateRuleInput) SetTags(v []*Tag) *CreateRuleInput { + s.Tags = v + return s +} + type CreateRuleOutput struct { _ struct{} `type:"structure"` @@ -4624,8 +4723,12 @@ type CreateTargetGroupInput struct { // lambda, the default is 35 seconds. HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` - // [HTTP/HTTPS health checks] The ping path that is the destination on the targets - // for health checks. The default is /. + // [HTTP/HTTPS health checks] The destination for health checks on the targets. + // + // [HTTP1 or HTTP2 protocol version] The ping path. The default is /. + // + // [GRPC protocol version] The path of a custom health check method with the + // format /package.service/method. The default is /AWS.ALB/healthcheck. HealthCheckPath *string `min:"1" type:"string"` // The port the load balancer uses when performing health checks on targets. @@ -4654,8 +4757,8 @@ type CreateTargetGroupInput struct { // the default is 3. If the target type is lambda, the default is 5. HealthyThresholdCount *int64 `min:"2" type:"integer"` - // [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful - // response from a target. + // [HTTP/HTTPS health checks] The HTTP or gRPC codes to use when checking for + // a successful response from a target. Matcher *Matcher `type:"structure"` // The name of the target group. @@ -4679,13 +4782,20 @@ type CreateTargetGroupInput struct { // function, this parameter does not apply. Protocol *string `type:"string" enum:"ProtocolEnum"` + // [HTTP/HTTPS protocol] The protocol version. Specify GRPC to send requests + // to targets using gRPC. Specify HTTP2 to send requests to targets using HTTP/2. + // The default is HTTP1, which sends requests to targets using HTTP/1.1. + ProtocolVersion *string `type:"string"` + + // The tags to assign to the target group. + Tags []*Tag `min:"1" type:"list"` + // The type of target that you must specify when registering targets with this // target group. You can't specify targets for a target group using more than // one target type. // // * instance - Targets are specified by instance ID. This is the default - // value. If the target group protocol is UDP or TCP_UDP, the target type - // must be instance. + // value. // // * ip - Targets are specified by IP address. You can specify IP addresses // from the subnets of the virtual private cloud (VPC) for the target group, @@ -4739,12 +4849,20 @@ func (s *CreateTargetGroupInput) Validate() error { if s.Port != nil && *s.Port < 1 { invalidParams.Add(request.NewErrParamMinValue("Port", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.UnhealthyThresholdCount != nil && *s.UnhealthyThresholdCount < 2 { invalidParams.Add(request.NewErrParamMinValue("UnhealthyThresholdCount", 2)) } - if s.Matcher != nil { - if err := s.Matcher.Validate(); err != nil { - invalidParams.AddNested("Matcher", err.(request.ErrInvalidParams)) + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } } } @@ -4820,6 +4938,18 @@ func (s *CreateTargetGroupInput) SetProtocol(v string) *CreateTargetGroupInput { return s } +// SetProtocolVersion sets the ProtocolVersion field's value. +func (s *CreateTargetGroupInput) SetProtocolVersion(v string) *CreateTargetGroupInput { + s.ProtocolVersion = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTargetGroupInput) SetTags(v []*Tag) *CreateTargetGroupInput { + s.Tags = v + return s +} + // SetTargetType sets the TargetType field's value. func (s *CreateTargetGroupInput) SetTargetType(v string) *CreateTargetGroupInput { s.TargetType = &v @@ -5733,7 +5863,7 @@ type DescribeSSLPoliciesOutput struct { // Otherwise, this is null. NextMarker *string `type:"string"` - // Information about the policies. + // Information about the security policies. SslPolicies []*SslPolicy `type:"list"` } @@ -5762,7 +5892,8 @@ func (s *DescribeSSLPoliciesOutput) SetSslPolicies(v []*SslPolicy) *DescribeSSLP type DescribeTagsInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Names (ARN) of the resources. + // The Amazon Resource Names (ARN) of the resources. You can specify up to 20 + // resources in a single call. // // ResourceArns is a required field ResourceArns []*string `type:"list" required:"true"` @@ -6338,6 +6469,10 @@ func (s *Limit) SetName(v string) *Limit { type Listener struct { _ struct{} `type:"structure"` + // [TLS listener] The name of the Application-Layer Protocol Negotiation (ALPN) + // policy. + AlpnPolicy []*string `type:"list"` + // [HTTPS or TLS listener] The default certificate for the listener. Certificates []*Certificate `type:"list"` @@ -6356,8 +6491,8 @@ type Listener struct { // The protocol for connections from clients to the load balancer. Protocol *string `type:"string" enum:"ProtocolEnum"` - // [HTTPS or TLS listener] The security policy that defines which ciphers and - // protocols are supported. The default is the current predefined security policy. + // [HTTPS or TLS listener] The security policy that defines which protocols + // and ciphers are supported. SslPolicy *string `type:"string"` } @@ -6371,6 +6506,12 @@ func (s Listener) GoString() string { return s.String() } +// SetAlpnPolicy sets the AlpnPolicy field's value. +func (s *Listener) SetAlpnPolicy(v []*string) *Listener { + s.AlpnPolicy = v + return s +} + // SetCertificates sets the Certificates field's value. func (s *Listener) SetCertificates(v []*Certificate) *Listener { s.Certificates = v @@ -6417,7 +6558,7 @@ func (s *Listener) SetSslPolicy(v string) *Listener { type LoadBalancer struct { _ struct{} `type:"structure"` - // The Availability Zones for the load balancer. + // The subnets for the load balancer. AvailabilityZones []*AvailabilityZone `type:"list"` // The ID of the Amazon Route 53 hosted zone associated with the load balancer. @@ -6426,6 +6567,10 @@ type LoadBalancer struct { // The date and time the load balancer was created. CreatedTime *time.Time `type:"timestamp"` + // [Application Load Balancers on Outposts] The ID of the customer-owned address + // pool. + CustomerOwnedIpv4Pool *string `type:"string"` + // The public DNS name of the load balancer. DNSName *string `type:"string"` @@ -6492,6 +6637,12 @@ func (s *LoadBalancer) SetCreatedTime(v time.Time) *LoadBalancer { return s } +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *LoadBalancer) SetCustomerOwnedIpv4Pool(v string) *LoadBalancer { + s.CustomerOwnedIpv4Pool = &v + return s +} + // SetDNSName sets the DNSName field's value. func (s *LoadBalancer) SetDNSName(v string) *LoadBalancer { s.DNSName = &v @@ -6617,12 +6768,18 @@ type LoadBalancerAttribute struct { // * idle_timeout.timeout_seconds - The idle timeout value, in seconds. The // valid range is 1-4000 seconds. The default is 60 seconds. // + // * routing.http.desync_mitigation_mode - Determines how the load balancer + // handles requests that might pose a security risk to your application. + // The possible values are monitor, defensive, and strictest. The default + // is defensive. + // // * routing.http.drop_invalid_header_fields.enabled - Indicates whether // HTTP headers with invalid header fields are removed by the load balancer // (true) or routed to targets (false). The default is false. // // * routing.http2.enabled - Indicates whether HTTP/2 is enabled. The value - // is true or false. The default is true. + // is true or false. The default is true. Elastic Load Balancing requires + // that message header names contain only alphanumeric characters and hyphens. // // The following attributes are supported by only Network Load Balancers: // @@ -6691,20 +6848,23 @@ func (s *LoadBalancerState) SetReason(v string) *LoadBalancerState { return s } -// Information to use when checking for a successful response from a target. +// The codes to use when checking for a successful response from a target. If +// the protocol version is gRPC, these are gRPC codes. Otherwise, these are +// HTTP codes. type Matcher struct { _ struct{} `type:"structure"` - // The HTTP codes. - // + // You can specify values between 0 and 99. You can specify multiple values + // (for example, "0,1") or a range of values (for example, "0-5"). The default + // value is 12. + GrpcCode *string `type:"string"` + // For Application Load Balancers, you can specify values between 200 and 499, // and the default value is 200. You can specify multiple values (for example, // "200,202") or a range of values (for example, "200-299"). // - // For Network Load Balancers, this is 200–399. - // - // HttpCode is a required field - HttpCode *string `type:"string" required:"true"` + // For Network Load Balancers, this is "200–399". + HttpCode *string `type:"string"` } // String returns the string representation @@ -6717,17 +6877,10 @@ func (s Matcher) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Matcher) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Matcher"} - if s.HttpCode == nil { - invalidParams.Add(request.NewErrParamRequired("HttpCode")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetGrpcCode sets the GrpcCode field's value. +func (s *Matcher) SetGrpcCode(v string) *Matcher { + s.GrpcCode = &v + return s } // SetHttpCode sets the HttpCode field's value. @@ -6739,6 +6892,23 @@ func (s *Matcher) SetHttpCode(v string) *Matcher { type ModifyListenerInput struct { _ struct{} `type:"structure"` + // [TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) + // policy. You can specify one policy name. The following are the possible values: + // + // * HTTP1Only + // + // * HTTP2Only + // + // * HTTP2Optional + // + // * HTTP2Preferred + // + // * None + // + // For more information, see ALPN Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#alpn-policies) + // in the Network Load Balancers Guide. + AlpnPolicy []*string `type:"list"` + // [HTTPS and TLS listeners] The default certificate for the listener. You must // provide exactly one certificate. Set CertificateArn to the certificate ARN // but do not set IsDefault. @@ -6746,25 +6916,7 @@ type ModifyListenerInput struct { // To create a certificate list, use AddListenerCertificates. Certificates []*Certificate `type:"list"` - // The actions for the default rule. The rule must include one forward action - // or one or more fixed-response actions. - // - // If the action type is forward, you specify one or more target groups. The - // protocol of the target group must be HTTP or HTTPS for an Application Load - // Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP - // for a Network Load Balancer. - // - // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate - // users through an identity provider that is OpenID Connect (OIDC) compliant. - // - // [HTTPS listeners] If the action type is authenticate-cognito, you authenticate - // users through the user pools supported by Amazon Cognito. - // - // [Application Load Balancer] If the action type is redirect, you redirect - // specified client requests from one URL to another. - // - // [Application Load Balancer] If the action type is fixed-response, you drop - // specified client requests and return a custom HTTP response. + // The actions for the default rule. DefaultActions []*Action `type:"list"` // The Amazon Resource Name (ARN) of the listener. @@ -6781,8 +6933,29 @@ type ModifyListenerInput struct { Protocol *string `type:"string" enum:"ProtocolEnum"` // [HTTPS and TLS listeners] The security policy that defines which protocols - // and ciphers are supported. For more information, see Security Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) - // in the Application Load Balancers Guide. + // and ciphers are supported. The following are the possible values: + // + // * ELBSecurityPolicy-2016-08 + // + // * ELBSecurityPolicy-TLS-1-0-2015-04 + // + // * ELBSecurityPolicy-TLS-1-1-2017-01 + // + // * ELBSecurityPolicy-TLS-1-2-2017-01 + // + // * ELBSecurityPolicy-TLS-1-2-Ext-2018-06 + // + // * ELBSecurityPolicy-FS-2018-06 + // + // * ELBSecurityPolicy-FS-1-1-2019-08 + // + // * ELBSecurityPolicy-FS-1-2-2019-08 + // + // * ELBSecurityPolicy-FS-1-2-Res-2019-08 + // + // For more information, see Security Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) + // in the Application Load Balancers Guide and Security Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) + // in the Network Load Balancers Guide. SslPolicy *string `type:"string"` } @@ -6822,6 +6995,12 @@ func (s *ModifyListenerInput) Validate() error { return nil } +// SetAlpnPolicy sets the AlpnPolicy field's value. +func (s *ModifyListenerInput) SetAlpnPolicy(v []*string) *ModifyListenerInput { + s.AlpnPolicy = v + return s +} + // SetCertificates sets the Certificates field's value. func (s *ModifyListenerInput) SetCertificates(v []*Certificate) *ModifyListenerInput { s.Certificates = v @@ -6959,31 +7138,10 @@ func (s *ModifyLoadBalancerAttributesOutput) SetAttributes(v []*LoadBalancerAttr type ModifyRuleInput struct { _ struct{} `type:"structure"` - // The actions. Each rule must include exactly one of the following types of - // actions: forward, fixed-response, or redirect, and it must be the last action - // to be performed. - // - // If the action type is forward, you specify one or more target groups. The - // protocol of the target group must be HTTP or HTTPS for an Application Load - // Balancer. The protocol of the target group must be TCP, TLS, UDP, or TCP_UDP - // for a Network Load Balancer. - // - // [HTTPS listeners] If the action type is authenticate-oidc, you authenticate - // users through an identity provider that is OpenID Connect (OIDC) compliant. - // - // [HTTPS listeners] If the action type is authenticate-cognito, you authenticate - // users through the user pools supported by Amazon Cognito. - // - // [Application Load Balancer] If the action type is redirect, you redirect - // specified client requests from one URL to another. - // - // [Application Load Balancer] If the action type is fixed-response, you drop - // specified client requests and return a custom HTTP response. + // The actions. Actions []*Action `type:"list"` - // The conditions. Each rule can include zero or one of the following conditions: - // http-request-method, host-header, path-pattern, and source-ip, and zero or - // more of the following conditions: http-header and query-string. + // The conditions. Conditions []*RuleCondition `type:"list"` // The Amazon Resource Name (ARN) of the rule. @@ -7148,14 +7306,18 @@ type ModifyTargetGroupInput struct { HealthCheckEnabled *bool `type:"boolean"` // The approximate amount of time, in seconds, between health checks of an individual - // target. For Application Load Balancers, the range is 5 to 300 seconds. For - // Network Load Balancers, the supported values are 10 or 30 seconds. + // target. For HTTP and HTTPS health checks, the range is 5 to 300 seconds. + // For TPC health checks, the supported values are 10 or 30 seconds. // // With Network Load Balancers, you can't modify this setting. HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` - // [HTTP/HTTPS health checks] The ping path that is the destination for the - // health check request. + // [HTTP/HTTPS health checks] The destination for health checks on the targets. + // + // [HTTP1 or HTTP2 protocol version] The ping path. The default is /. + // + // [GRPC protocol version] The path of a custom health check method with the + // format /package.service/method. The default is /AWS.ALB/healthcheck. HealthCheckPath *string `min:"1" type:"string"` // The port the load balancer uses when performing health checks on targets. @@ -7179,8 +7341,8 @@ type ModifyTargetGroupInput struct { // an unhealthy target healthy. HealthyThresholdCount *int64 `min:"2" type:"integer"` - // [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful - // response from a target. + // [HTTP/HTTPS health checks] The HTTP or gRPC codes to use when checking for + // a successful response from a target. // // With Network Load Balancers, you can't modify this setting. Matcher *Matcher `type:"structure"` @@ -7191,8 +7353,8 @@ type ModifyTargetGroupInput struct { TargetGroupArn *string `type:"string" required:"true"` // The number of consecutive health check failures required before considering - // the target unhealthy. For Network Load Balancers, this value must be the - // same as the healthy threshold count. + // the target unhealthy. For target groups with a protocol of TCP or TLS, this + // value must be the same as the healthy threshold count. UnhealthyThresholdCount *int64 `min:"2" type:"integer"` } @@ -7227,11 +7389,6 @@ func (s *ModifyTargetGroupInput) Validate() error { if s.UnhealthyThresholdCount != nil && *s.UnhealthyThresholdCount < 2 { invalidParams.Add(request.NewErrParamMinValue("UnhealthyThresholdCount", 2)) } - if s.Matcher != nil { - if err := s.Matcher.Validate(); err != nil { - invalidParams.AddNested("Matcher", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -7551,10 +7708,6 @@ type RegisterTargetsInput struct { // The targets. // - // To register a target by instance ID, specify the instance ID. To register - // a target by IP address, specify the IP address. To register a Lambda function, - // specify the ARN of the Lambda function. - // // Targets is a required field Targets []*TargetDescription `type:"list" required:"true"` } @@ -7819,6 +7972,11 @@ func (s *Rule) SetRuleArn(v string) *Rule { } // Information about a condition for a rule. +// +// Each rule can optionally include up to one of each of the following conditions: +// http-request-method, host-header, path-pattern, and source-ip. Each rule +// can also optionally include one or more of each of the following conditions: +// http-header and query-string. type RuleCondition struct { _ struct{} `type:"structure"` @@ -7855,13 +8013,14 @@ type RuleCondition struct { // Information for a source IP condition. Specify only when Field is source-ip. SourceIpConfig *SourceIpConditionConfig `type:"structure"` - // The condition value. You can use Values if the rule contains only host-header - // and path-pattern conditions. Otherwise, you can use HostHeaderConfig for - // host-header conditions and PathPatternConfig for path-pattern conditions. + // The condition value. Specify only when Field is host-header or path-pattern. + // Alternatively, to specify multiple host names or multiple path patterns, + // use HostHeaderConfig or PathPatternConfig. // - // If Field is host-header, you can specify a single host name (for example, - // my.example.com). A host name is case insensitive, can be up to 128 characters - // in length, and can contain any of the following characters. + // If Field is host-header and you are not using HostHeaderConfig, you can specify + // a single host name (for example, my.example.com) in Values. A host name is + // case insensitive, can be up to 128 characters in length, and can contain + // any of the following characters. // // * A-Z, a-z, 0-9 // @@ -7871,9 +8030,10 @@ type RuleCondition struct { // // * ? (matches exactly 1 character) // - // If Field is path-pattern, you can specify a single path pattern (for example, - // /img/*). A path pattern is case-sensitive, can be up to 128 characters in - // length, and can contain any of the following characters. + // If Field is path-pattern and you are not using PathPatternConfig, you can + // specify a single path pattern (for example, /img/*) in Values. A path pattern + // is case-sensitive, can be up to 128 characters in length, and can contain + // any of the following characters. // // * A-Z, a-z, 0-9 // @@ -8285,7 +8445,7 @@ func (s *SetSubnetsInput) SetSubnets(v []*string) *SetSubnetsInput { type SetSubnetsOutput struct { _ struct{} `type:"structure"` - // Information about the subnet and Availability Zone. + // Information about the subnets. AvailabilityZones []*AvailabilityZone `type:"list"` } @@ -8598,7 +8758,7 @@ type TargetGroup struct { // target. HealthCheckIntervalSeconds *int64 `min:"5" type:"integer"` - // The destination for the health check request. + // The destination for health checks on the targets. HealthCheckPath *string `min:"1" type:"string"` // The port to use to connect with the target. @@ -8619,7 +8779,8 @@ type TargetGroup struct { // to this target group. LoadBalancerArns []*string `type:"list"` - // The HTTP codes to use when checking for a successful response from a target. + // The HTTP or gRPC codes to use when checking for a successful response from + // a target. Matcher *Matcher `type:"structure"` // The port on which the targets are listening. Not used if the target is a @@ -8629,6 +8790,10 @@ type TargetGroup struct { // The protocol to use for routing traffic to the targets. Protocol *string `type:"string" enum:"ProtocolEnum"` + // [HTTP/HTTPS protocol] The protocol version. The possible values are GRPC, + // HTTP1, and HTTP2. + ProtocolVersion *string `type:"string"` + // The Amazon Resource Name (ARN) of the target group. TargetGroupArn *string `type:"string"` @@ -8724,6 +8889,12 @@ func (s *TargetGroup) SetProtocol(v string) *TargetGroup { return s } +// SetProtocolVersion sets the ProtocolVersion field's value. +func (s *TargetGroup) SetProtocolVersion(v string) *TargetGroup { + s.ProtocolVersion = &v + return s +} + // SetTargetGroupArn sets the TargetGroupArn field's value. func (s *TargetGroup) SetTargetGroupArn(v string) *TargetGroup { s.TargetGroupArn = &v @@ -8760,8 +8931,8 @@ type TargetGroupAttribute struct { // The name of the attribute. // - // The following attribute is supported by both Application Load Balancers and - // Network Load Balancers: + // The following attributes are supported by both Application Load Balancers + // and Network Load Balancers: // // * deregistration_delay.timeout_seconds - The amount of time, in seconds, // for Elastic Load Balancing to wait before changing the state of a deregistering @@ -8769,24 +8940,25 @@ type TargetGroupAttribute struct { // value is 300 seconds. If the target is a Lambda function, this attribute // is not supported. // - // The following attributes are supported by Application Load Balancers if the - // target is not a Lambda function: + // * stickiness.enabled - Indicates whether sticky sessions are enabled. + // The value is true or false. The default is false. + // + // * stickiness.type - The type of sticky sessions. The possible values are + // lb_cookie for Application Load Balancers or source_ip for Network Load + // Balancers. + // + // The following attributes are supported only if the load balancer is an Application + // Load Balancer and the target is an instance or an IP address: // // * load_balancing.algorithm.type - The load balancing algorithm determines // how the load balancer selects targets when routing requests. The value // is round_robin or least_outstanding_requests. The default is round_robin. // // * slow_start.duration_seconds - The time period, in seconds, during which - // a newly registered target receives a linearly increasing share of the - // traffic to the target group. After this time period ends, the target receives - // its full share of traffic. The range is 30-900 seconds (15 minutes). Slow - // start mode is disabled by default. - // - // * stickiness.enabled - Indicates whether sticky sessions are enabled. - // The value is true or false. The default is false. - // - // * stickiness.type - The type of sticky sessions. The possible value is - // lb_cookie. + // a newly registered target receives an increasing share of the traffic + // to the target group. After this time period ends, the target receives + // its full share of traffic. The range is 30-900 seconds (15 minutes). The + // default is 0 seconds (disabled). // // * stickiness.lb_cookie.duration_seconds - The time period, in seconds, // during which requests from a client should be routed to the same target. @@ -8794,14 +8966,15 @@ type TargetGroupAttribute struct { // considered stale. The range is 1 second to 1 week (604800 seconds). The // default value is 1 day (86400 seconds). // - // The following attribute is supported only if the target is a Lambda function. + // The following attribute is supported only if the load balancer is an Application + // Load Balancer and the target is a Lambda function: // // * lambda.multi_value_headers.enabled - Indicates whether the request and - // response headers exchanged between the load balancer and the Lambda function - // include arrays of values or strings. The value is true or false. The default - // is false. If the value is false and the request contains a duplicate header - // field name or query parameter key, the load balancer uses the last value - // sent by the client. + // response headers that are exchanged between the load balancer and the + // Lambda function include arrays of values or strings. The value is true + // or false. The default is false. If the value is false and the request + // contains a duplicate header field name or query parameter key, the load + // balancer uses the last value sent by the client. // // The following attribute is supported only by Network Load Balancers: // @@ -9060,6 +9233,17 @@ const ( ActionTypeEnumFixedResponse = "fixed-response" ) +// ActionTypeEnum_Values returns all elements of the ActionTypeEnum enum +func ActionTypeEnum_Values() []string { + return []string{ + ActionTypeEnumForward, + ActionTypeEnumAuthenticateOidc, + ActionTypeEnumAuthenticateCognito, + ActionTypeEnumRedirect, + ActionTypeEnumFixedResponse, + } +} + const ( // AuthenticateCognitoActionConditionalBehaviorEnumDeny is a AuthenticateCognitoActionConditionalBehaviorEnum enum value AuthenticateCognitoActionConditionalBehaviorEnumDeny = "deny" @@ -9071,6 +9255,15 @@ const ( AuthenticateCognitoActionConditionalBehaviorEnumAuthenticate = "authenticate" ) +// AuthenticateCognitoActionConditionalBehaviorEnum_Values returns all elements of the AuthenticateCognitoActionConditionalBehaviorEnum enum +func AuthenticateCognitoActionConditionalBehaviorEnum_Values() []string { + return []string{ + AuthenticateCognitoActionConditionalBehaviorEnumDeny, + AuthenticateCognitoActionConditionalBehaviorEnumAllow, + AuthenticateCognitoActionConditionalBehaviorEnumAuthenticate, + } +} + const ( // AuthenticateOidcActionConditionalBehaviorEnumDeny is a AuthenticateOidcActionConditionalBehaviorEnum enum value AuthenticateOidcActionConditionalBehaviorEnumDeny = "deny" @@ -9082,6 +9275,15 @@ const ( AuthenticateOidcActionConditionalBehaviorEnumAuthenticate = "authenticate" ) +// AuthenticateOidcActionConditionalBehaviorEnum_Values returns all elements of the AuthenticateOidcActionConditionalBehaviorEnum enum +func AuthenticateOidcActionConditionalBehaviorEnum_Values() []string { + return []string{ + AuthenticateOidcActionConditionalBehaviorEnumDeny, + AuthenticateOidcActionConditionalBehaviorEnumAllow, + AuthenticateOidcActionConditionalBehaviorEnumAuthenticate, + } +} + const ( // IpAddressTypeIpv4 is a IpAddressType enum value IpAddressTypeIpv4 = "ipv4" @@ -9090,6 +9292,14 @@ const ( IpAddressTypeDualstack = "dualstack" ) +// IpAddressType_Values returns all elements of the IpAddressType enum +func IpAddressType_Values() []string { + return []string{ + IpAddressTypeIpv4, + IpAddressTypeDualstack, + } +} + const ( // LoadBalancerSchemeEnumInternetFacing is a LoadBalancerSchemeEnum enum value LoadBalancerSchemeEnumInternetFacing = "internet-facing" @@ -9098,6 +9308,14 @@ const ( LoadBalancerSchemeEnumInternal = "internal" ) +// LoadBalancerSchemeEnum_Values returns all elements of the LoadBalancerSchemeEnum enum +func LoadBalancerSchemeEnum_Values() []string { + return []string{ + LoadBalancerSchemeEnumInternetFacing, + LoadBalancerSchemeEnumInternal, + } +} + const ( // LoadBalancerStateEnumActive is a LoadBalancerStateEnum enum value LoadBalancerStateEnumActive = "active" @@ -9112,6 +9330,16 @@ const ( LoadBalancerStateEnumFailed = "failed" ) +// LoadBalancerStateEnum_Values returns all elements of the LoadBalancerStateEnum enum +func LoadBalancerStateEnum_Values() []string { + return []string{ + LoadBalancerStateEnumActive, + LoadBalancerStateEnumProvisioning, + LoadBalancerStateEnumActiveImpaired, + LoadBalancerStateEnumFailed, + } +} + const ( // LoadBalancerTypeEnumApplication is a LoadBalancerTypeEnum enum value LoadBalancerTypeEnumApplication = "application" @@ -9120,6 +9348,14 @@ const ( LoadBalancerTypeEnumNetwork = "network" ) +// LoadBalancerTypeEnum_Values returns all elements of the LoadBalancerTypeEnum enum +func LoadBalancerTypeEnum_Values() []string { + return []string{ + LoadBalancerTypeEnumApplication, + LoadBalancerTypeEnumNetwork, + } +} + const ( // ProtocolEnumHttp is a ProtocolEnum enum value ProtocolEnumHttp = "HTTP" @@ -9140,6 +9376,18 @@ const ( ProtocolEnumTcpUdp = "TCP_UDP" ) +// ProtocolEnum_Values returns all elements of the ProtocolEnum enum +func ProtocolEnum_Values() []string { + return []string{ + ProtocolEnumHttp, + ProtocolEnumHttps, + ProtocolEnumTcp, + ProtocolEnumTls, + ProtocolEnumUdp, + ProtocolEnumTcpUdp, + } +} + const ( // RedirectActionStatusCodeEnumHttp301 is a RedirectActionStatusCodeEnum enum value RedirectActionStatusCodeEnumHttp301 = "HTTP_301" @@ -9148,6 +9396,14 @@ const ( RedirectActionStatusCodeEnumHttp302 = "HTTP_302" ) +// RedirectActionStatusCodeEnum_Values returns all elements of the RedirectActionStatusCodeEnum enum +func RedirectActionStatusCodeEnum_Values() []string { + return []string{ + RedirectActionStatusCodeEnumHttp301, + RedirectActionStatusCodeEnumHttp302, + } +} + const ( // TargetHealthReasonEnumElbRegistrationInProgress is a TargetHealthReasonEnum enum value TargetHealthReasonEnumElbRegistrationInProgress = "Elb.RegistrationInProgress" @@ -9186,6 +9442,24 @@ const ( TargetHealthReasonEnumElbInternalError = "Elb.InternalError" ) +// TargetHealthReasonEnum_Values returns all elements of the TargetHealthReasonEnum enum +func TargetHealthReasonEnum_Values() []string { + return []string{ + TargetHealthReasonEnumElbRegistrationInProgress, + TargetHealthReasonEnumElbInitialHealthChecking, + TargetHealthReasonEnumTargetResponseCodeMismatch, + TargetHealthReasonEnumTargetTimeout, + TargetHealthReasonEnumTargetFailedHealthChecks, + TargetHealthReasonEnumTargetNotRegistered, + TargetHealthReasonEnumTargetNotInUse, + TargetHealthReasonEnumTargetDeregistrationInProgress, + TargetHealthReasonEnumTargetInvalidState, + TargetHealthReasonEnumTargetIpUnusable, + TargetHealthReasonEnumTargetHealthCheckDisabled, + TargetHealthReasonEnumElbInternalError, + } +} + const ( // TargetHealthStateEnumInitial is a TargetHealthStateEnum enum value TargetHealthStateEnumInitial = "initial" @@ -9206,6 +9480,18 @@ const ( TargetHealthStateEnumUnavailable = "unavailable" ) +// TargetHealthStateEnum_Values returns all elements of the TargetHealthStateEnum enum +func TargetHealthStateEnum_Values() []string { + return []string{ + TargetHealthStateEnumInitial, + TargetHealthStateEnumHealthy, + TargetHealthStateEnumUnhealthy, + TargetHealthStateEnumUnused, + TargetHealthStateEnumDraining, + TargetHealthStateEnumUnavailable, + } +} + const ( // TargetTypeEnumInstance is a TargetTypeEnum enum value TargetTypeEnumInstance = "instance" @@ -9216,3 +9502,12 @@ const ( // TargetTypeEnumLambda is a TargetTypeEnum enum value TargetTypeEnumLambda = "lambda" ) + +// TargetTypeEnum_Values returns all elements of the TargetTypeEnum enum +func TargetTypeEnum_Values() []string { + return []string{ + TargetTypeEnumInstance, + TargetTypeEnumIp, + TargetTypeEnumLambda, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go index fa10830ff9c2..3a9ab86f1b90 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go @@ -4,6 +4,12 @@ package elbv2 const ( + // ErrCodeALPNPolicyNotSupportedException for service response error code + // "ALPNPolicyNotFound". + // + // The specified ALPN policy is not supported. + ErrCodeALPNPolicyNotSupportedException = "ALPNPolicyNotFound" + // ErrCodeAllocationIdNotFoundException for service response error code // "AllocationIdNotFound". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index caa94ee95a63..8e2aae8e5d63 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -179,7 +179,9 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r // at least one active HSM. To get the number of active HSMs in a cluster, use // the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) // operation. To add HSMs to the cluster, use the CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. +// operation. Also, the kmsuser crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) +// (CU) must not be logged into the cluster. This prevents AWS KMS from using +// this account to log in. // // The connection process can take an extended amount of time to complete; up // to 20 minutes. This operation starts the connection process, but it does @@ -192,8 +194,7 @@ func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (r // During the connection process, AWS KMS finds the AWS CloudHSM cluster that // is associated with the custom key store, creates the connection infrastructure, // connects to the cluster, logs into the AWS CloudHSM client as the kmsuser -// crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) -// (CU), and rotates its password. +// CU, and rotates its password. // // The ConnectCustomKeyStore operation might fail for various reasons. To find // the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode @@ -349,9 +350,9 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, // CreateAlias API operation for AWS Key Management Service. // // Creates a display name for a customer managed customer master key (CMK). -// You can use an alias to identify a CMK in cryptographic operations, such -// as Encrypt and GenerateDataKey. You can change the CMK associated with the -// alias at any time. +// You can use an alias to identify a CMK in cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations), +// such as Encrypt and GenerateDataKey. You can change the CMK associated with +// the alias at any time. // // Aliases are easier to remember than key IDs. They can also help to simplify // your applications. For example, if you use an alias in your code, you can @@ -398,11 +399,12 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, // a new alias with the desired name. // // * You can use an alias name or alias ARN to identify a CMK in AWS KMS -// cryptographic operations and in the DescribeKey operation. However, you -// cannot use alias names or alias ARNs in API operations that manage CMKs, -// such as DisableKey or GetKeyPolicy. For information about the valid CMK -// identifiers for each AWS KMS API operation, see the descriptions of the -// KeyId parameter in the API operation documentation. +// cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// and in the DescribeKey operation. However, you cannot use alias names +// or alias ARNs in API operations that manage CMKs, such as DisableKey or +// GetKeyPolicy. For information about the valid CMK identifiers for each +// AWS KMS API operation, see the descriptions of the KeyId parameter in +// the API operation documentation. // // Because an alias is not a property of a CMK, you can delete and change the // aliases of a CMK without affecting the CMK. Also, aliases do not appear in @@ -441,8 +443,8 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, // can be retried. // // * LimitExceededException -// The request was rejected because a limit was exceeded. For more information, -// see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // // * InvalidStateException @@ -692,8 +694,8 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, // principal to use the CMK when the conditions specified in the grant are met. // When setting permissions, grants are an alternative to key policies. // -// To create a grant that allows a cryptographic operation only when the request -// includes a particular encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context), +// To create a grant that allows a cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// only when the request includes a particular encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context), // use the Constraints parameter. For details, see GrantConstraints. // // You can create grants on symmetric and asymmetric CMKs. However, if the grant @@ -763,8 +765,8 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, // The request was rejected because the specified grant token is not valid. // // * LimitExceededException -// The request was rejected because a limit was exceeded. For more information, -// see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // // * InvalidStateException @@ -851,7 +853,8 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out // KMS unencrypted. To use the CMK, you must call AWS KMS. You can use a // symmetric CMK to encrypt and decrypt small amounts of data, but they are // typically used to generate data keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) -// or data key pairs. For details, see GenerateDataKey and GenerateDataKeyPair. +// and data keys pairs (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-key-pairs). +// For details, see GenerateDataKey and GenerateDataKeyPair. // // * Asymmetric CMKs can contain an RSA key pair or an Elliptic Curve (ECC) // key pair. The private key in an asymmetric CMK never leaves AWS KMS unencrypted. @@ -932,8 +935,8 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out // can be retried. // // * LimitExceededException -// The request was rejected because a limit was exceeded. For more information, -// see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // // * TagException @@ -1355,12 +1358,12 @@ func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req // The custom key store that you delete cannot contain any AWS KMS customer // master keys (CMKs) (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys). // Before deleting the key store, verify that you will never need to use any -// of the CMKs in the key store for any cryptographic operations. Then, use -// ScheduleKeyDeletion to delete the AWS KMS customer master keys (CMKs) from -// the key store. When the scheduled waiting period expires, the ScheduleKeyDeletion -// operation deletes the CMKs. Then it makes a best effort to delete the key -// material from the associated cluster. However, you might need to manually -// delete the orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) +// of the CMKs in the key store for any cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// Then, use ScheduleKeyDeletion to delete the AWS KMS customer master keys +// (CMKs) from the key store. When the scheduled waiting period expires, the +// ScheduleKeyDeletion operation deletes the CMKs. Then it makes a best effort +// to delete the key material from the associated cluster. However, you might +// need to manually delete the orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) // from the cluster and its backups. // // After all CMKs are deleted from AWS KMS, use DisconnectCustomKeyStore to @@ -1842,8 +1845,8 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o // DisableKey API operation for AWS Key Management Service. // // Sets the state of a customer master key (CMK) to disabled, thereby preventing -// its use for cryptographic operations. You cannot perform this operation on -// a CMK in a different AWS account. +// its use for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// You cannot perform this operation on a CMK in a different AWS account. // // For more information about how key state affects the use of a CMK, see How // Key State Affects the Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -2077,8 +2080,9 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp // // While a custom key store is disconnected, all attempts to create customer // master keys (CMKs) in the custom key store or to use existing CMKs in cryptographic -// operations will fail. This action can prevent users from storing and accessing -// sensitive data. +// operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// will fail. This action can prevent users from storing and accessing sensitive +// data. // // To find the connection state of a custom key store, use the DescribeCustomKeyStores // operation. To reconnect a custom key store, use the ConnectCustomKeyStore @@ -2193,8 +2197,8 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out // EnableKey API operation for AWS Key Management Service. // // Sets the key state of a customer master key (CMK) to enabled. This allows -// you to use the CMK for cryptographic operations. You cannot perform this -// operation on a CMK in a different AWS account. +// you to use the CMK for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// You cannot perform this operation on a CMK in a different AWS account. // // The CMK that you use for this operation must be in a compatible key state. // For details, see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -2225,8 +2229,8 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out // can be retried. // // * LimitExceededException -// The request was rejected because a limit was exceeded. For more information, -// see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // // * InvalidStateException @@ -2427,11 +2431,12 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output // identifier or database password, or other sensitive information. // // * You can use the Encrypt operation to move encrypted data from one AWS -// region to another. In the first region, generate a data key and use the -// plaintext key to encrypt the data. Then, in the new region, call the Encrypt -// method on same plaintext data key. Now, you can safely move the encrypted -// data and encrypted data key to the new region, and decrypt in the new -// region when necessary. +// Region to another. For example, in Region A, generate a data key and use +// the plaintext key to encrypt your data. Then, in Region A, use the Encrypt +// operation to encrypt the plaintext data key under a CMK in Region B. Now, +// you can move the encrypted data and the encrypted data key to Region B. +// When necessary, you can decrypt the encrypted data key and the encrypted +// data entirely within in Region B. // // You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey // and GenerateDataKeyPair operations return a plaintext data key and an encrypted @@ -2601,26 +2606,20 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // GenerateDataKey API operation for AWS Key Management Service. // -// Generates a unique symmetric data key. This operation returns a plaintext -// copy of the data key and a copy that is encrypted under a customer master -// key (CMK) that you specify. You can use the plaintext key to encrypt your -// data outside of AWS KMS and store the encrypted data key with the encrypted -// data. +// Generates a unique symmetric data key for client-side encryption. This operation +// returns a plaintext copy of the data key and a copy that is encrypted under +// a customer master key (CMK) that you specify. You can use the plaintext key +// to encrypt your data outside of AWS KMS and store the encrypted data key +// with the encrypted data. // // GenerateDataKey returns a unique data key for each request. The bytes in -// the key are not related to the caller or CMK that is used to encrypt the -// data key. +// the plaintext key are not related to the caller or the CMK. // // To generate a data key, specify the symmetric CMK that will be used to encrypt -// the data key. You cannot use an asymmetric CMK to generate data keys. -// -// You must also specify the length of the data key. Use either the KeySpec -// or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data -// keys, use the KeySpec parameter. -// -// If the operation succeeds, the plaintext copy of the data key is in the Plaintext -// field of the response, and the encrypted copy of the data key in the CiphertextBlob -// field. +// the data key. You cannot use an asymmetric CMK to generate data keys. To +// get the type of your CMK, use the DescribeKey operation. You must also specify +// the length of the data key. Use either the KeySpec or NumberOfBytes parameters +// (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter. // // To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. // To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext @@ -2637,24 +2636,32 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // For details, see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // +// How to use your data key +// // We recommend that you use the following pattern to encrypt data locally in -// your application: +// your application. You can write your own code or use a client-side encryption +// library, such as the AWS Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/), +// the Amazon DynamoDB Encryption Client (https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/), +// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) +// to do these tasks for you. +// +// To encrypt data outside of AWS KMS: // -// Use the GenerateDataKey operation to get a data encryption key. +// Use the GenerateDataKey operation to get a data key. // -// Use the plaintext data key (returned in the Plaintext field of the response) -// to encrypt data locally, then erase the plaintext data key from memory. +// Use the plaintext data key (in the Plaintext field of the response) to encrypt +// your data outside of AWS KMS. Then erase the plaintext data key from memory. // -// Store the encrypted data key (returned in the CiphertextBlob field of the -// response) alongside the locally encrypted data. +// Store the encrypted data key (in the CiphertextBlob field of the response) +// with the encrypted data. // -// To decrypt data locally: +// To decrypt data outside of AWS KMS: // // Use the Decrypt operation to decrypt the encrypted data key. The operation // returns a plaintext copy of the data key. // -// Use the plaintext data key to decrypt data locally, then erase the plaintext -// data key from memory. +// Use the plaintext data key to decrypt data outside of AWS KMS, then erase +// the plaintext data key from memory. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2791,7 +2798,8 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req * // // To generate a data key pair, you must specify a symmetric customer master // key (CMK) to encrypt the private key in a data key pair. You cannot use an -// asymmetric CMK. To get the type of your CMK, use the DescribeKey operation. +// asymmetric CMK or a CMK in a custom key store. To get the type and origin +// of your CMK, use the DescribeKey operation. // // If you are using the data key pair to encrypt data, or for any operation // where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext @@ -2865,6 +2873,10 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req * // Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide . // +// * UnsupportedOperationException +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPair func (c *KMS) GenerateDataKeyPair(input *GenerateDataKeyPairInput) (*GenerateDataKeyPairOutput, error) { req, out := c.GenerateDataKeyPairRequest(input) @@ -2938,8 +2950,8 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP // // To generate a data key pair, you must specify a symmetric customer master // key (CMK) to encrypt the private key in the data key pair. You cannot use -// an asymmetric CMK. To get the type of your CMK, use the KeySpec field in -// the DescribeKey response. +// an asymmetric CMK or a CMK in a custom key store. To get the type and origin +// of your CMK, use the KeySpec field in the DescribeKey response. // // You can use the public key that GenerateDataKeyPairWithoutPlaintext returns // to encrypt data or verify a signature outside of AWS KMS. Then, store the @@ -3015,6 +3027,10 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP // Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide . // +// * UnsupportedOperationException +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintext func (c *KMS) GenerateDataKeyPairWithoutPlaintext(input *GenerateDataKeyPairWithoutPlaintextInput) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { req, out := c.GenerateDataKeyPairWithoutPlaintextRequest(input) @@ -3107,15 +3123,11 @@ func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWitho // // To generate a data key, you must specify the symmetric customer master key // (CMK) that is used to encrypt the data key. You cannot use an asymmetric -// CMK to generate a data key. To get the type of your CMK, use the KeySpec -// field in the DescribeKey response. You must also specify the length of the -// data key using either the KeySpec or NumberOfBytes field (but not both). -// For common key lengths (128-bit and 256-bit symmetric keys), use the KeySpec -// parameter. +// CMK to generate a data key. To get the type of your CMK, use the DescribeKey +// operation. // -// If the operation succeeds, you will find the plaintext copy of the data key -// in the Plaintext field of the response, and the encrypted copy of the data -// key in the CiphertextBlob field. +// If the operation succeeds, you will find the encrypted copy of the data key +// in the CiphertextBlob field. // // You can use the optional encryption context to add additional security to // the encryption operation. If you specify an EncryptionContext, you must specify @@ -4079,7 +4091,7 @@ func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, // The response might also include aliases that have no TargetKeyId field. These // are predefined aliases that AWS has created but has not yet associated with // a CMK. Aliases that AWS creates in your account, including predefined aliases, -// do not count against your AWS KMS aliases limit (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit). +// do not count against your AWS KMS aliases quota (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4238,6 +4250,12 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o // To perform this operation on a CMK in a different AWS account, specify the // key ARN in the value of the KeyId parameter. // +// The GranteePrincipal field in the ListGrants response usually contains the +// user or role designated as the grantee principal in the grant. However, when +// the grantee principal in the grant is an AWS service, the GranteePrincipal +// field contains the service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), +// which might represent several different grantee principals. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4934,8 +4952,8 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques // can be retried. // // * LimitExceededException -// The request was rejected because a limit was exceeded. For more information, -// see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // // * InvalidStateException @@ -5017,13 +5035,15 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out // is encrypted, such as when you manually rotate (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually) // a CMK or change the CMK that protects a ciphertext. You can also use it to // reencrypt ciphertext under the same CMK, such as to change the encryption -// context of a ciphertext. +// context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// of a ciphertext. // // The ReEncrypt operation can decrypt ciphertext that was encrypted by using // an AWS KMS CMK in an AWS KMS operation, such as Encrypt or GenerateDataKey. // It can also decrypt ciphertext that was encrypted by using the public key -// of an asymmetric CMK outside of AWS KMS. However, it cannot decrypt ciphertext -// produced by other libraries, such as the AWS Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) +// of an asymmetric CMK (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks) +// outside of AWS KMS. However, it cannot decrypt ciphertext produced by other +// libraries, such as the AWS Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) // or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). // These libraries return a ciphertext format that is incompatible with AWS // KMS. @@ -5058,17 +5078,16 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out // // Unlike other AWS KMS API operations, ReEncrypt callers must have two permissions: // -// * kms:EncryptFrom permission on the source CMK -// -// * kms:EncryptTo permission on the destination CMK +// * kms:ReEncryptFrom permission on the source CMK // -// To permit reencryption from +// * kms:ReEncryptTo permission on the destination CMK // -// or to a CMK, include the "kms:ReEncrypt*" permission in your key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). +// To permit reencryption from or to a CMK, include the "kms:ReEncrypt*" permission +// in your key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). // This permission is automatically included in the key policy when you use // the console to create a CMK. But you must include it manually when you create -// a CMK programmatically or when you use the PutKeyPolicy operation set a key -// policy. +// a CMK programmatically or when you use the PutKeyPolicy operation to set +// a key policy. // // The CMK that you use for this operation must be in a compatible key state. // For details, see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -5770,8 +5789,8 @@ func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request, // in the AWS Key Management Service Developer Guide . // // * LimitExceededException -// The request was rejected because a limit was exceeded. For more information, -// see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. // // * TagException @@ -5996,6 +6015,11 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, // The request was rejected because an internal exception occurred. The request // can be retried. // +// * LimitExceededException +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// in the AWS Key Management Service Developer Guide. +// // * InvalidStateException // The request was rejected because the state of the specified resource is not // valid for this request. @@ -6534,8 +6558,8 @@ func (s *AliasListEntry) SetTargetKeyId(v string) *AliasListEntry { // The request was rejected because it attempted to create a resource that already // exists. type AlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6552,17 +6576,17 @@ func (s AlreadyExistsException) GoString() string { func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error { return &AlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AlreadyExistsException) Code() string { +func (s *AlreadyExistsException) Code() string { return "AlreadyExistsException" } // Message returns the exception's message. -func (s AlreadyExistsException) Message() string { +func (s *AlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6570,22 +6594,22 @@ func (s AlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AlreadyExistsException) OrigErr() error { +func (s *AlreadyExistsException) OrigErr() error { return nil } -func (s AlreadyExistsException) Error() string { +func (s *AlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *AlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } type CancelKeyDeletionInput struct { @@ -6643,7 +6667,8 @@ func (s *CancelKeyDeletionInput) SetKeyId(v string) *CancelKeyDeletionInput { type CancelKeyDeletionOutput struct { _ struct{} `type:"structure"` - // The unique identifier of the master key for which deletion is canceled. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK whose deletion is canceled. KeyId *string `min:"1" type:"string"` } @@ -6672,8 +6697,8 @@ func (s *CancelKeyDeletionOutput) SetKeyId(v string) *CancelKeyDeletionOutput { // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) // operation. type CloudHsmClusterInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6690,17 +6715,17 @@ func (s CloudHsmClusterInUseException) GoString() string { func newErrorCloudHsmClusterInUseException(v protocol.ResponseMetadata) error { return &CloudHsmClusterInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterInUseException) Code() string { +func (s *CloudHsmClusterInUseException) Code() string { return "CloudHsmClusterInUseException" } // Message returns the exception's message. -func (s CloudHsmClusterInUseException) Message() string { +func (s *CloudHsmClusterInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6708,22 +6733,22 @@ func (s CloudHsmClusterInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterInUseException) OrigErr() error { +func (s *CloudHsmClusterInUseException) OrigErr() error { return nil } -func (s CloudHsmClusterInUseException) Error() string { +func (s *CloudHsmClusterInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the associated AWS CloudHSM cluster did @@ -6757,8 +6782,8 @@ func (s CloudHsmClusterInUseException) RequestID() string { // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) // in the AWS CloudHSM User Guide . type CloudHsmClusterInvalidConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6775,17 +6800,17 @@ func (s CloudHsmClusterInvalidConfigurationException) GoString() string { func newErrorCloudHsmClusterInvalidConfigurationException(v protocol.ResponseMetadata) error { return &CloudHsmClusterInvalidConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterInvalidConfigurationException) Code() string { +func (s *CloudHsmClusterInvalidConfigurationException) Code() string { return "CloudHsmClusterInvalidConfigurationException" } // Message returns the exception's message. -func (s CloudHsmClusterInvalidConfigurationException) Message() string { +func (s *CloudHsmClusterInvalidConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6793,22 +6818,22 @@ func (s CloudHsmClusterInvalidConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterInvalidConfigurationException) OrigErr() error { +func (s *CloudHsmClusterInvalidConfigurationException) OrigErr() error { return nil } -func (s CloudHsmClusterInvalidConfigurationException) Error() string { +func (s *CloudHsmClusterInvalidConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterInvalidConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterInvalidConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterInvalidConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterInvalidConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the AWS CloudHSM cluster that is associated @@ -6817,8 +6842,8 @@ func (s CloudHsmClusterInvalidConfigurationException) RequestID() string { // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) // in the AWS CloudHSM User Guide. type CloudHsmClusterNotActiveException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6835,17 +6860,17 @@ func (s CloudHsmClusterNotActiveException) GoString() string { func newErrorCloudHsmClusterNotActiveException(v protocol.ResponseMetadata) error { return &CloudHsmClusterNotActiveException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterNotActiveException) Code() string { +func (s *CloudHsmClusterNotActiveException) Code() string { return "CloudHsmClusterNotActiveException" } // Message returns the exception's message. -func (s CloudHsmClusterNotActiveException) Message() string { +func (s *CloudHsmClusterNotActiveException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6853,30 +6878,30 @@ func (s CloudHsmClusterNotActiveException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterNotActiveException) OrigErr() error { +func (s *CloudHsmClusterNotActiveException) OrigErr() error { return nil } -func (s CloudHsmClusterNotActiveException) Error() string { +func (s *CloudHsmClusterNotActiveException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterNotActiveException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterNotActiveException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterNotActiveException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterNotActiveException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because AWS KMS cannot find the AWS CloudHSM cluster // with the specified cluster ID. Retry the request with a different cluster // ID. type CloudHsmClusterNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6893,17 +6918,17 @@ func (s CloudHsmClusterNotFoundException) GoString() string { func newErrorCloudHsmClusterNotFoundException(v protocol.ResponseMetadata) error { return &CloudHsmClusterNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterNotFoundException) Code() string { +func (s *CloudHsmClusterNotFoundException) Code() string { return "CloudHsmClusterNotFoundException" } // Message returns the exception's message. -func (s CloudHsmClusterNotFoundException) Message() string { +func (s *CloudHsmClusterNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6911,22 +6936,22 @@ func (s CloudHsmClusterNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterNotFoundException) OrigErr() error { +func (s *CloudHsmClusterNotFoundException) OrigErr() error { return nil } -func (s CloudHsmClusterNotFoundException) Error() string { +func (s *CloudHsmClusterNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified AWS CloudHSM cluster has a @@ -6942,8 +6967,8 @@ func (s CloudHsmClusterNotFoundException) RequestID() string { // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) // operation. type CloudHsmClusterNotRelatedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6960,17 +6985,17 @@ func (s CloudHsmClusterNotRelatedException) GoString() string { func newErrorCloudHsmClusterNotRelatedException(v protocol.ResponseMetadata) error { return &CloudHsmClusterNotRelatedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterNotRelatedException) Code() string { +func (s *CloudHsmClusterNotRelatedException) Code() string { return "CloudHsmClusterNotRelatedException" } // Message returns the exception's message. -func (s CloudHsmClusterNotRelatedException) Message() string { +func (s *CloudHsmClusterNotRelatedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6978,22 +7003,22 @@ func (s CloudHsmClusterNotRelatedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterNotRelatedException) OrigErr() error { +func (s *CloudHsmClusterNotRelatedException) OrigErr() error { return nil } -func (s CloudHsmClusterNotRelatedException) Error() string { +func (s *CloudHsmClusterNotRelatedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterNotRelatedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterNotRelatedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterNotRelatedException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterNotRelatedException) RequestID() string { + return s.RespMetadata.RequestID } type ConnectCustomKeyStoreInput struct { @@ -7150,11 +7175,13 @@ type CreateCustomKeyStoreInput struct { // in the specified AWS CloudHSM cluster. AWS KMS logs into the cluster as this // user to manage key material on your behalf. // + // The password must be a string of 7 to 32 characters. Its value is case sensitive. + // // This parameter tells AWS KMS the kmsuser account password; it does not change // the password in the AWS CloudHSM cluster. // // KeyStorePassword is a required field - KeyStorePassword *string `min:"1" type:"string" required:"true" sensitive:"true"` + KeyStorePassword *string `min:"7" type:"string" required:"true" sensitive:"true"` // Enter the content of the trust anchor certificate for the cluster. This is // the content of the customerCA.crt file that you created when you initialized @@ -7192,8 +7219,8 @@ func (s *CreateCustomKeyStoreInput) Validate() error { if s.KeyStorePassword == nil { invalidParams.Add(request.NewErrParamRequired("KeyStorePassword")) } - if s.KeyStorePassword != nil && len(*s.KeyStorePassword) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyStorePassword", 1)) + if s.KeyStorePassword != nil && len(*s.KeyStorePassword) < 7 { + invalidParams.Add(request.NewErrParamMinLen("KeyStorePassword", 7)) } if s.TrustAnchorCertificate == nil { invalidParams.Add(request.NewErrParamRequired("TrustAnchorCertificate")) @@ -7258,9 +7285,10 @@ func (s *CreateCustomKeyStoreOutput) SetCustomKeyStoreId(v string) *CreateCustom type CreateGrantInput struct { _ struct{} `type:"structure"` - // Allows a cryptographic operation only when the encryption context matches - // or includes the encryption context specified in this structure. For more - // information about encryption context, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // Allows a cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // only when the encryption context matches or includes the encryption context + // specified in this structure. For more information about encryption context, + // see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the AWS Key Management Service Developer Guide . Constraints *GrantConstraints `type:"structure"` @@ -7489,19 +7517,26 @@ type CreateKeyInput struct { // of AWS KMS with the isolation and control of a single-tenant key store. CustomKeyStoreId *string `min:"1" type:"string"` - // Specifies the type of CMK to create. The CustomerMasterKeySpec determines - // whether the CMK contains a symmetric key or an asymmetric key pair. It also - // determines the encryption algorithms or signing algorithms that the CMK supports. - // You can't change the CustomerMasterKeySpec after the CMK is created. To further - // restrict the algorithms that can be used with the CMK, use its key policy - // or IAM policy. + // Specifies the type of CMK to create. The default value, SYMMETRIC_DEFAULT, + // creates a CMK with a 256-bit symmetric key for encryption and decryption. + // For help choosing a key spec for your CMK, see How to Choose Your CMK Configuration + // (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-choose.html) + // in the AWS Key Management Service Developer Guide. // - // For help with choosing a key spec for your CMK, see Selecting a Customer - // Master Key Spec (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#cmk-key-spec) + // The CustomerMasterKeySpec determines whether the CMK contains a symmetric + // key or an asymmetric key pair. It also determines the encryption algorithms + // or signing algorithms that the CMK supports. You can't change the CustomerMasterKeySpec + // after the CMK is created. To further restrict the algorithms that can be + // used with the CMK, use a condition key in its key policy or IAM policy. For + // more information, see kms:EncryptionAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm) + // or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) // in the AWS Key Management Service Developer Guide. // - // The default value, SYMMETRIC_DEFAULT, creates a CMK with a 256-bit symmetric - // key. + // AWS services that are integrated with AWS KMS (http://aws.amazon.com/kms/features/#AWS_Service_Integration) + // use symmetric CMKs to protect your data. These services do not support asymmetric + // CMKs. For help determining whether a CMK is symmetric or asymmetric, see + // Identifying Symmetric and Asymmetric CMKs (https://docs.aws.amazon.com/kms/latest/developerguide/find-symm-asymm.html) + // in the AWS Key Management Service Developer Guide. // // AWS KMS supports the following key specs for CMKs: // @@ -7522,9 +7557,10 @@ type CreateKeyInput struct { // a task. Description *string `type:"string"` - // Determines the cryptographic operations for which you can use the CMK. The - // default value is ENCRYPT_DECRYPT. This parameter is required only for asymmetric - // CMKs. You can't change the KeyUsage value after the CMK is created. + // Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // for which you can use the CMK. The default value is ENCRYPT_DECRYPT. This + // parameter is required only for asymmetric CMKs. You can't change the KeyUsage + // value after the CMK is created. // // Select only one valid value. // @@ -7578,7 +7614,7 @@ type CreateKeyInput struct { // to the CMK. For more information, see Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) // in the AWS Key Management Service Developer Guide. // - // The key policy size limit is 32 kilobytes (32768 bytes). + // The key policy size quota is 32 kilobytes (32768 bytes). Policy *string `min:"1" type:"string"` // One or more tags. Each tag consists of a tag key and a tag value. Both the @@ -7706,8 +7742,8 @@ func (s *CreateKeyOutput) SetKeyMetadata(v *KeyMetadata) *CreateKeyOutput { // use the ScheduleKeyDeletion operation to delete the CMKs. After they are // deleted, you can delete the custom key store. type CustomKeyStoreHasCMKsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7724,17 +7760,17 @@ func (s CustomKeyStoreHasCMKsException) GoString() string { func newErrorCustomKeyStoreHasCMKsException(v protocol.ResponseMetadata) error { return &CustomKeyStoreHasCMKsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomKeyStoreHasCMKsException) Code() string { +func (s *CustomKeyStoreHasCMKsException) Code() string { return "CustomKeyStoreHasCMKsException" } // Message returns the exception's message. -func (s CustomKeyStoreHasCMKsException) Message() string { +func (s *CustomKeyStoreHasCMKsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7742,22 +7778,22 @@ func (s CustomKeyStoreHasCMKsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomKeyStoreHasCMKsException) OrigErr() error { +func (s *CustomKeyStoreHasCMKsException) OrigErr() error { return nil } -func (s CustomKeyStoreHasCMKsException) Error() string { +func (s *CustomKeyStoreHasCMKsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomKeyStoreHasCMKsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomKeyStoreHasCMKsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomKeyStoreHasCMKsException) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomKeyStoreHasCMKsException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because of the ConnectionState of the custom key @@ -7778,8 +7814,8 @@ func (s CustomKeyStoreHasCMKsException) RequestID() string { // with a ConnectionState of DISCONNECTING or FAILED. This operation is valid // for all other ConnectionState values. type CustomKeyStoreInvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7796,17 +7832,17 @@ func (s CustomKeyStoreInvalidStateException) GoString() string { func newErrorCustomKeyStoreInvalidStateException(v protocol.ResponseMetadata) error { return &CustomKeyStoreInvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomKeyStoreInvalidStateException) Code() string { +func (s *CustomKeyStoreInvalidStateException) Code() string { return "CustomKeyStoreInvalidStateException" } // Message returns the exception's message. -func (s CustomKeyStoreInvalidStateException) Message() string { +func (s *CustomKeyStoreInvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7814,30 +7850,30 @@ func (s CustomKeyStoreInvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomKeyStoreInvalidStateException) OrigErr() error { +func (s *CustomKeyStoreInvalidStateException) OrigErr() error { return nil } -func (s CustomKeyStoreInvalidStateException) Error() string { +func (s *CustomKeyStoreInvalidStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomKeyStoreInvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomKeyStoreInvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomKeyStoreInvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomKeyStoreInvalidStateException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified custom key store name is already // assigned to another custom key store in the account. Try again with a custom // key store name that is unique in the account. type CustomKeyStoreNameInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7854,17 +7890,17 @@ func (s CustomKeyStoreNameInUseException) GoString() string { func newErrorCustomKeyStoreNameInUseException(v protocol.ResponseMetadata) error { return &CustomKeyStoreNameInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomKeyStoreNameInUseException) Code() string { +func (s *CustomKeyStoreNameInUseException) Code() string { return "CustomKeyStoreNameInUseException" } // Message returns the exception's message. -func (s CustomKeyStoreNameInUseException) Message() string { +func (s *CustomKeyStoreNameInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7872,29 +7908,29 @@ func (s CustomKeyStoreNameInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomKeyStoreNameInUseException) OrigErr() error { +func (s *CustomKeyStoreNameInUseException) OrigErr() error { return nil } -func (s CustomKeyStoreNameInUseException) Error() string { +func (s *CustomKeyStoreNameInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomKeyStoreNameInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomKeyStoreNameInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomKeyStoreNameInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomKeyStoreNameInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because AWS KMS cannot find a custom key store with // the specified key store name or ID. type CustomKeyStoreNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7911,17 +7947,17 @@ func (s CustomKeyStoreNotFoundException) GoString() string { func newErrorCustomKeyStoreNotFoundException(v protocol.ResponseMetadata) error { return &CustomKeyStoreNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomKeyStoreNotFoundException) Code() string { +func (s *CustomKeyStoreNotFoundException) Code() string { return "CustomKeyStoreNotFoundException" } // Message returns the exception's message. -func (s CustomKeyStoreNotFoundException) Message() string { +func (s *CustomKeyStoreNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7929,22 +7965,22 @@ func (s CustomKeyStoreNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomKeyStoreNotFoundException) OrigErr() error { +func (s *CustomKeyStoreNotFoundException) OrigErr() error { return nil } -func (s CustomKeyStoreNotFoundException) Error() string { +func (s *CustomKeyStoreNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomKeyStoreNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomKeyStoreNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomKeyStoreNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomKeyStoreNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about each custom key store in the custom key store @@ -7956,7 +7992,12 @@ type CustomKeyStoresListEntry struct { // the custom key store. CloudHsmClusterId *string `min:"19" type:"string"` - // Describes the connection error. Valid values are: + // Describes the connection error. This field appears in the response only when + // the ConnectionState is FAILED. For help resolving these errors, see How to + // Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // in AWS Key Management Service Developer Guide. + // + // Valid values are: // // * CLUSTER_NOT_FOUND - AWS KMS cannot find the AWS CloudHSM cluster with // the specified cluster ID. @@ -7970,20 +8011,43 @@ type CustomKeyStoresListEntry struct { // the custom key store before trying to connect again. // // * INVALID_CREDENTIALS - AWS KMS does not have the correct password for - // the kmsuser crypto user in the AWS CloudHSM cluster. + // the kmsuser crypto user in the AWS CloudHSM cluster. Before you can connect + // your custom key store to its AWS CloudHSM cluster, you must change the + // kmsuser account password and update the key store password value for the + // custom key store. // // * NETWORK_ERRORS - Network errors are preventing AWS KMS from connecting // to the custom key store. // + // * SUBNET_NOT_FOUND - A subnet in the AWS CloudHSM cluster configuration + // was deleted. If AWS KMS cannot find all of the subnets in the cluster + // configuration, attempts to connect the custom key store to the AWS CloudHSM + // cluster fail. To fix this error, create a cluster from a recent backup + // and associate it with your custom key store. (This process creates a new + // cluster configuration with a VPC and private subnets.) For details, see + // How to Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // in the AWS Key Management Service Developer Guide. + // // * USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated // AWS CloudHSM cluster due to too many failed password attempts. Before // you can connect your custom key store to its AWS CloudHSM cluster, you - // must change the kmsuser account password and update the password value - // for the custom key store. - // - // For help with connection failures, see Troubleshooting Custom Key Stores - // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) - // in the AWS Key Management Service Developer Guide. + // must change the kmsuser account password and update the key store password + // value for the custom key store. + // + // * USER_LOGGED_IN - The kmsuser CU account is logged into the the associated + // AWS CloudHSM cluster. This prevents AWS KMS from rotating the kmsuser + // account password and logging into the cluster. Before you can connect + // your custom key store to its AWS CloudHSM cluster, you must log the kmsuser + // CU out of the cluster. If you changed the kmsuser password to log into + // the cluster, you must also and update the key store password value for + // the custom key store. For help, see How to Log Out and Reconnect (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2) + // in the AWS Key Management Service Developer Guide. + // + // * USER_NOT_FOUND - AWS KMS cannot find a kmsuser CU account in the associated + // AWS CloudHSM cluster. Before you can connect your custom key store to + // its AWS CloudHSM cluster, you must create a kmsuser CU account in the + // cluster, and then update the key store password value for the custom key + // store. ConnectionErrorCode *string `type:"string" enum:"ConnectionErrorCodeType"` // Indicates whether the custom key store is connected to its AWS CloudHSM cluster. @@ -7998,8 +8062,9 @@ type CustomKeyStoresListEntry struct { // one active HSM. // // A value of FAILED indicates that an attempt to connect was unsuccessful. - // For help resolving a connection failure, see Troubleshooting a Custom Key - // Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) + // The ConnectionErrorCode field in the response indicates the cause of the + // failure. For help resolving a connection failure, see Troubleshooting a Custom + // Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) // in the AWS Key Management Service Developer Guide. ConnectionState *string `type:"string" enum:"ConnectionStateType"` @@ -8090,9 +8155,9 @@ type DecryptInput struct { EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` // Specifies the encryption context to use when decrypting the data. An encryption - // context is valid only for cryptographic operations with a symmetric CMK. - // The standard asymmetric encryption algorithms that AWS KMS uses do not support - // an encryption context. + // context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // with a symmetric CMK. The standard asymmetric encryption algorithms that + // AWS KMS uses do not support an encryption context. // // An encryption context is a collection of non-secret key-value pairs that // represents additional authenticated data. When you use an encryption context @@ -8205,7 +8270,8 @@ type DecryptOutput struct { // The encryption algorithm that was used to decrypt the ciphertext. EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - // The ARN of the customer master key that was used to perform the decryption. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that was used to decrypt the ciphertext. KeyId *string `min:"1" type:"string"` // Decrypted plaintext data. When you use the HTTP API or the AWS CLI, the value @@ -8424,8 +8490,8 @@ func (s DeleteImportedKeyMaterialOutput) GoString() string { // The system timed out while trying to fulfill the request. The request can // be retried. type DependencyTimeoutException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8442,17 +8508,17 @@ func (s DependencyTimeoutException) GoString() string { func newErrorDependencyTimeoutException(v protocol.ResponseMetadata) error { return &DependencyTimeoutException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DependencyTimeoutException) Code() string { +func (s *DependencyTimeoutException) Code() string { return "DependencyTimeoutException" } // Message returns the exception's message. -func (s DependencyTimeoutException) Message() string { +func (s *DependencyTimeoutException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8460,22 +8526,22 @@ func (s DependencyTimeoutException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DependencyTimeoutException) OrigErr() error { +func (s *DependencyTimeoutException) OrigErr() error { return nil } -func (s DependencyTimeoutException) Error() string { +func (s *DependencyTimeoutException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DependencyTimeoutException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DependencyTimeoutException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DependencyTimeoutException) RequestID() string { - return s.respMetadata.RequestID +func (s *DependencyTimeoutException) RequestID() string { + return s.RespMetadata.RequestID } type DescribeCustomKeyStoresInput struct { @@ -8843,8 +8909,8 @@ func (s DisableKeyRotationOutput) GoString() string { // The request was rejected because the specified CMK is not enabled. type DisabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8861,17 +8927,17 @@ func (s DisabledException) GoString() string { func newErrorDisabledException(v protocol.ResponseMetadata) error { return &DisabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DisabledException) Code() string { +func (s *DisabledException) Code() string { return "DisabledException" } // Message returns the exception's message. -func (s DisabledException) Message() string { +func (s *DisabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8879,22 +8945,22 @@ func (s DisabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DisabledException) OrigErr() error { +func (s *DisabledException) OrigErr() error { return nil } -func (s DisabledException) Error() string { +func (s *DisabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DisabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DisabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DisabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *DisabledException) RequestID() string { + return s.RespMetadata.RequestID } type DisconnectCustomKeyStoreInput struct { @@ -9097,9 +9163,9 @@ type EncryptInput struct { EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` // Specifies the encryption context that will be used to encrypt the data. An - // encryption context is valid only for cryptographic operations with a symmetric - // CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not - // support an encryption context. + // encryption context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // with a symmetric CMK. The standard asymmetric encryption algorithms that + // AWS KMS uses do not support an encryption context. // // An encryption context is a collection of non-secret key-value pairs that // represents additional authenticated data. When you use an encryption context @@ -9221,7 +9287,8 @@ type EncryptOutput struct { // The encryption algorithm that was used to encrypt the plaintext. EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - // The ID of the key used during encryption. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that was used to encrypt the plaintext. KeyId *string `min:"1" type:"string"` } @@ -9257,8 +9324,8 @@ func (s *EncryptOutput) SetKeyId(v string) *EncryptOutput { // GetParametersForImport to get a new import token and public key, use the // new public key to encrypt the key material, and then try the request again. type ExpiredImportTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9275,17 +9342,17 @@ func (s ExpiredImportTokenException) GoString() string { func newErrorExpiredImportTokenException(v protocol.ResponseMetadata) error { return &ExpiredImportTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExpiredImportTokenException) Code() string { +func (s *ExpiredImportTokenException) Code() string { return "ExpiredImportTokenException" } // Message returns the exception's message. -func (s ExpiredImportTokenException) Message() string { +func (s *ExpiredImportTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9293,22 +9360,22 @@ func (s ExpiredImportTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExpiredImportTokenException) OrigErr() error { +func (s *ExpiredImportTokenException) OrigErr() error { return nil } -func (s ExpiredImportTokenException) Error() string { +func (s *ExpiredImportTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExpiredImportTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExpiredImportTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExpiredImportTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ExpiredImportTokenException) RequestID() string { + return s.RespMetadata.RequestID } type GenerateDataKeyInput struct { @@ -9439,7 +9506,8 @@ type GenerateDataKeyOutput struct { // CiphertextBlob is automatically base64 encoded/decoded by the SDK. CiphertextBlob []byte `min:"1" type:"blob"` - // The identifier of the CMK that encrypted the data key. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that encrypted the data key. KeyId *string `min:"1" type:"string"` // The plaintext data key. When you use the HTTP API or the AWS CLI, the value @@ -9502,7 +9570,8 @@ type GenerateDataKeyPairInput struct { GrantTokens []*string `type:"list"` // Specifies the symmetric CMK that encrypts the private key in the data key - // pair. You cannot specify an asymmetric CMKs. + // pair. You cannot specify an asymmetric CMK or a CMK in a custom key store. + // To get the type and origin of your CMK, use the DescribeKey operation. // // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, // or alias ARN. When using an alias name, prefix it with "alias/". To specify @@ -9591,7 +9660,8 @@ func (s *GenerateDataKeyPairInput) SetKeyPairSpec(v string) *GenerateDataKeyPair type GenerateDataKeyPairOutput struct { _ struct{} `type:"structure"` - // The identifier of the CMK that encrypted the private key. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that encrypted the private key. KeyId *string `min:"1" type:"string"` // The type of data key pair that was generated. @@ -9678,7 +9748,9 @@ type GenerateDataKeyPairWithoutPlaintextInput struct { GrantTokens []*string `type:"list"` // Specifies the CMK that encrypts the private key in the data key pair. You - // must specify a symmetric CMK. You cannot use an asymmetric CMK. + // must specify a symmetric CMK. You cannot use an asymmetric CMK or a CMK in + // a custom key store. To get the type and origin of your CMK, use the DescribeKey + // operation. // // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, // or alias ARN. When using an alias name, prefix it with "alias/". @@ -9766,24 +9838,8 @@ func (s *GenerateDataKeyPairWithoutPlaintextInput) SetKeyPairSpec(v string) *Gen type GenerateDataKeyPairWithoutPlaintextOutput struct { _ struct{} `type:"structure"` - // Specifies the CMK that encrypted the private key in the data key pair. You - // must specify a symmetric CMK. You cannot use an asymmetric CMK. - // - // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, - // or alias ARN. When using an alias name, prefix it with "alias/". - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To - // get the alias name and alias ARN, use ListAliases. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that encrypted the private key. KeyId *string `min:"1" type:"string"` // The type of data key pair that was generated. @@ -9959,7 +10015,8 @@ type GenerateDataKeyWithoutPlaintextOutput struct { // CiphertextBlob is automatically base64 encoded/decoded by the SDK. CiphertextBlob []byte `min:"1" type:"blob"` - // The identifier of the CMK that encrypted the data key. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that encrypted the data key. KeyId *string `min:"1" type:"string"` } @@ -10320,8 +10377,9 @@ type GetParametersForImportOutput struct { // ImportToken is automatically base64 encoded/decoded by the SDK. ImportToken []byte `min:"1" type:"blob"` - // The identifier of the CMK to use in a subsequent ImportKeyMaterial request. - // This is the same CMK specified in the GetParametersForImport request. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK to use in a subsequent ImportKeyMaterial request. This is the + // same CMK specified in the GetParametersForImport request. KeyId *string `min:"1" type:"string"` // The time at which the import token and public key are no longer valid. After @@ -10456,7 +10514,8 @@ type GetPublicKeyOutput struct { // is ENCRYPT_DECRYPT. EncryptionAlgorithms []*string `type:"list"` - // The identifier of the asymmetric CMK from which the public key was downloaded. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric CMK from which the public key was downloaded. KeyId *string `min:"1" type:"string"` // The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or @@ -10468,9 +10527,10 @@ type GetPublicKeyOutput struct { // The exported public key. // - // This value is returned as a binary Distinguished Encoding Rules (https://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf) - // (DER)-encoded object. To decode it, use an ASN.1 parsing tool, such as OpenSSL - // asn1parse (https://www.openssl.org/docs/man1.0.2/man1/asn1parse.html). + // The value is a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo + // (SPKI), as defined in RFC 5280 (https://tools.ietf.org/html/rfc5280). When + // you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, + // it is not Base64-encoded. // // PublicKey is automatically base64 encoded/decoded by the SDK. PublicKey []byte `min:"1" type:"blob"` @@ -10528,24 +10588,16 @@ func (s *GetPublicKeyOutput) SetSigningAlgorithms(v []*string) *GetPublicKeyOutp return s } -// Use this structure to allow cryptographic operations in the grant only when -// the operation request includes the specified encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). -// -// AWS KMS applies the grant constraints only when the grant allows a cryptographic -// operation that accepts an encryption context as input, such as the following. -// -// * Encrypt -// -// * Decrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyWithoutPlaintext +// Use this structure to allow cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// in the grant only when the operation request includes the specified encryption +// context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). // -// * ReEncrypt -// -// AWS KMS does not apply the grant constraints to other operations, such as -// DescribeKey or ScheduleKeyDeletion. +// AWS KMS applies the grant constraints only to cryptographic operations that +// support an encryption context, that is, all cryptographic operations with +// a symmetric CMK (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks). +// Grant constraints are not applied to operations that do not support an encryption +// context, such as cryptographic operations with asymmetric CMKs and management +// operations, such as DescribeKey or ScheduleKeyDeletion. // // In a cryptographic operation, the encryption context in the decryption operation // must be an exact, case-sensitive match for the keys and values in the encryption @@ -10563,16 +10615,16 @@ type GrantConstraints struct { _ struct{} `type:"structure"` // A list of key-value pairs that must match the encryption context in the cryptographic - // operation request. The grant allows the operation only when the encryption - // context in the request is the same as the encryption context specified in - // this constraint. + // operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // request. The grant allows the operation only when the encryption context + // in the request is the same as the encryption context specified in this constraint. EncryptionContextEquals map[string]*string `type:"map"` // A list of key-value pairs that must be included in the encryption context - // of the cryptographic operation request. The grant allows the cryptographic - // operation only when the encryption context in the request includes the key-value - // pairs specified in this constraint, although it can include additional key-value - // pairs. + // of the cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // request. The grant allows the cryptographic operation only when the encryption + // context in the request includes the key-value pairs specified in this constraint, + // although it can include additional key-value pairs. EncryptionContextSubset map[string]*string `type:"map"` } @@ -10598,7 +10650,7 @@ func (s *GrantConstraints) SetEncryptionContextSubset(v map[string]*string) *Gra return s } -// Contains information about an entry in a list of grants. +// Contains information about a grant. type GrantListEntry struct { _ struct{} `type:"structure"` @@ -10612,7 +10664,13 @@ type GrantListEntry struct { // The unique identifier for the grant. GrantId *string `min:"1" type:"string"` - // The principal that receives the grant's permissions. + // The identity that gets the permissions in the grant. + // + // The GranteePrincipal field in the ListGrants response usually contains the + // user or role designated as the grantee principal in the grant. However, when + // the grantee principal in the grant is an AWS service, the GranteePrincipal + // field contains the service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), + // which might represent several different grantee principals. GranteePrincipal *string `min:"1" type:"string"` // The AWS account under which the grant was issued. @@ -10834,8 +10892,8 @@ func (s ImportKeyMaterialOutput) GoString() string { // The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request // must identify the same CMK that was used to encrypt the ciphertext. type IncorrectKeyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10852,17 +10910,17 @@ func (s IncorrectKeyException) GoString() string { func newErrorIncorrectKeyException(v protocol.ResponseMetadata) error { return &IncorrectKeyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncorrectKeyException) Code() string { +func (s *IncorrectKeyException) Code() string { return "IncorrectKeyException" } // Message returns the exception's message. -func (s IncorrectKeyException) Message() string { +func (s *IncorrectKeyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10870,30 +10928,30 @@ func (s IncorrectKeyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncorrectKeyException) OrigErr() error { +func (s *IncorrectKeyException) OrigErr() error { return nil } -func (s IncorrectKeyException) Error() string { +func (s *IncorrectKeyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncorrectKeyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncorrectKeyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncorrectKeyException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncorrectKeyException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the key material in the request is, expired, // invalid, or is not the same key material that was previously imported into // this customer master key (CMK). type IncorrectKeyMaterialException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10910,17 +10968,17 @@ func (s IncorrectKeyMaterialException) GoString() string { func newErrorIncorrectKeyMaterialException(v protocol.ResponseMetadata) error { return &IncorrectKeyMaterialException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncorrectKeyMaterialException) Code() string { +func (s *IncorrectKeyMaterialException) Code() string { return "IncorrectKeyMaterialException" } // Message returns the exception's message. -func (s IncorrectKeyMaterialException) Message() string { +func (s *IncorrectKeyMaterialException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10928,22 +10986,22 @@ func (s IncorrectKeyMaterialException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncorrectKeyMaterialException) OrigErr() error { +func (s *IncorrectKeyMaterialException) OrigErr() error { return nil } -func (s IncorrectKeyMaterialException) Error() string { +func (s *IncorrectKeyMaterialException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncorrectKeyMaterialException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncorrectKeyMaterialException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncorrectKeyMaterialException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncorrectKeyMaterialException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the trust anchor certificate in the request @@ -10953,8 +11011,8 @@ func (s IncorrectKeyMaterialException) RequestID() string { // you create the trust anchor certificate and save it in the customerCA.crt // file. type IncorrectTrustAnchorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10971,17 +11029,17 @@ func (s IncorrectTrustAnchorException) GoString() string { func newErrorIncorrectTrustAnchorException(v protocol.ResponseMetadata) error { return &IncorrectTrustAnchorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncorrectTrustAnchorException) Code() string { +func (s *IncorrectTrustAnchorException) Code() string { return "IncorrectTrustAnchorException" } // Message returns the exception's message. -func (s IncorrectTrustAnchorException) Message() string { +func (s *IncorrectTrustAnchorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10989,29 +11047,29 @@ func (s IncorrectTrustAnchorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncorrectTrustAnchorException) OrigErr() error { +func (s *IncorrectTrustAnchorException) OrigErr() error { return nil } -func (s IncorrectTrustAnchorException) Error() string { +func (s *IncorrectTrustAnchorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncorrectTrustAnchorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncorrectTrustAnchorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncorrectTrustAnchorException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncorrectTrustAnchorException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because an internal exception occurred. The request // can be retried. type InternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11028,17 +11086,17 @@ func (s InternalException) GoString() string { func newErrorInternalException(v protocol.ResponseMetadata) error { return &InternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalException) Code() string { +func (s *InternalException) Code() string { return "KMSInternalException" } // Message returns the exception's message. -func (s InternalException) Message() string { +func (s *InternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11046,28 +11104,28 @@ func (s InternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalException) OrigErr() error { +func (s *InternalException) OrigErr() error { return nil } -func (s InternalException) Error() string { +func (s *InternalException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified alias name is not valid. type InvalidAliasNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11084,17 +11142,17 @@ func (s InvalidAliasNameException) GoString() string { func newErrorInvalidAliasNameException(v protocol.ResponseMetadata) error { return &InvalidAliasNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAliasNameException) Code() string { +func (s *InvalidAliasNameException) Code() string { return "InvalidAliasNameException" } // Message returns the exception's message. -func (s InvalidAliasNameException) Message() string { +func (s *InvalidAliasNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11102,29 +11160,29 @@ func (s InvalidAliasNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAliasNameException) OrigErr() error { +func (s *InvalidAliasNameException) OrigErr() error { return nil } -func (s InvalidAliasNameException) Error() string { +func (s *InvalidAliasNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAliasNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAliasNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAliasNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAliasNameException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because a specified ARN, or an ARN in a key policy, // is not valid. type InvalidArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11141,17 +11199,17 @@ func (s InvalidArnException) GoString() string { func newErrorInvalidArnException(v protocol.ResponseMetadata) error { return &InvalidArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArnException) Code() string { +func (s *InvalidArnException) Code() string { return "InvalidArnException" } // Message returns the exception's message. -func (s InvalidArnException) Message() string { +func (s *InvalidArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11159,22 +11217,22 @@ func (s InvalidArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArnException) OrigErr() error { +func (s *InvalidArnException) OrigErr() error { return nil } -func (s InvalidArnException) Error() string { +func (s *InvalidArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArnException) RequestID() string { + return s.RespMetadata.RequestID } // From the Decrypt or ReEncrypt operation, the request was rejected because @@ -11185,8 +11243,8 @@ func (s InvalidArnException) RequestID() string { // From the ImportKeyMaterial operation, the request was rejected because AWS // KMS could not decrypt the encrypted (wrapped) key material. type InvalidCiphertextException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11203,17 +11261,17 @@ func (s InvalidCiphertextException) GoString() string { func newErrorInvalidCiphertextException(v protocol.ResponseMetadata) error { return &InvalidCiphertextException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCiphertextException) Code() string { +func (s *InvalidCiphertextException) Code() string { return "InvalidCiphertextException" } // Message returns the exception's message. -func (s InvalidCiphertextException) Message() string { +func (s *InvalidCiphertextException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11221,28 +11279,28 @@ func (s InvalidCiphertextException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCiphertextException) OrigErr() error { +func (s *InvalidCiphertextException) OrigErr() error { return nil } -func (s InvalidCiphertextException) Error() string { +func (s *InvalidCiphertextException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCiphertextException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCiphertextException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCiphertextException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCiphertextException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified GrantId is not valid. type InvalidGrantIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11259,17 +11317,17 @@ func (s InvalidGrantIdException) GoString() string { func newErrorInvalidGrantIdException(v protocol.ResponseMetadata) error { return &InvalidGrantIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidGrantIdException) Code() string { +func (s *InvalidGrantIdException) Code() string { return "InvalidGrantIdException" } // Message returns the exception's message. -func (s InvalidGrantIdException) Message() string { +func (s *InvalidGrantIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11277,28 +11335,28 @@ func (s InvalidGrantIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidGrantIdException) OrigErr() error { +func (s *InvalidGrantIdException) OrigErr() error { return nil } -func (s InvalidGrantIdException) Error() string { +func (s *InvalidGrantIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidGrantIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidGrantIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidGrantIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidGrantIdException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified grant token is not valid. type InvalidGrantTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11315,17 +11373,17 @@ func (s InvalidGrantTokenException) GoString() string { func newErrorInvalidGrantTokenException(v protocol.ResponseMetadata) error { return &InvalidGrantTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidGrantTokenException) Code() string { +func (s *InvalidGrantTokenException) Code() string { return "InvalidGrantTokenException" } // Message returns the exception's message. -func (s InvalidGrantTokenException) Message() string { +func (s *InvalidGrantTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11333,29 +11391,29 @@ func (s InvalidGrantTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidGrantTokenException) OrigErr() error { +func (s *InvalidGrantTokenException) OrigErr() error { return nil } -func (s InvalidGrantTokenException) Error() string { +func (s *InvalidGrantTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidGrantTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidGrantTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidGrantTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidGrantTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the provided import token is invalid or // is associated with a different customer master key (CMK). type InvalidImportTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11372,17 +11430,17 @@ func (s InvalidImportTokenException) GoString() string { func newErrorInvalidImportTokenException(v protocol.ResponseMetadata) error { return &InvalidImportTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidImportTokenException) Code() string { +func (s *InvalidImportTokenException) Code() string { return "InvalidImportTokenException" } // Message returns the exception's message. -func (s InvalidImportTokenException) Message() string { +func (s *InvalidImportTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11390,22 +11448,22 @@ func (s InvalidImportTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidImportTokenException) OrigErr() error { +func (s *InvalidImportTokenException) OrigErr() error { return nil } -func (s InvalidImportTokenException) Error() string { +func (s *InvalidImportTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidImportTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidImportTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidImportTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidImportTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected for one of the following reasons: @@ -11422,8 +11480,8 @@ func (s InvalidImportTokenException) RequestID() string { // To find the encryption or signing algorithms supported for a particular CMK, // use the DescribeKey operation. type InvalidKeyUsageException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11440,17 +11498,17 @@ func (s InvalidKeyUsageException) GoString() string { func newErrorInvalidKeyUsageException(v protocol.ResponseMetadata) error { return &InvalidKeyUsageException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidKeyUsageException) Code() string { +func (s *InvalidKeyUsageException) Code() string { return "InvalidKeyUsageException" } // Message returns the exception's message. -func (s InvalidKeyUsageException) Message() string { +func (s *InvalidKeyUsageException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11458,29 +11516,29 @@ func (s InvalidKeyUsageException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidKeyUsageException) OrigErr() error { +func (s *InvalidKeyUsageException) OrigErr() error { return nil } -func (s InvalidKeyUsageException) Error() string { +func (s *InvalidKeyUsageException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidKeyUsageException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidKeyUsageException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidKeyUsageException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidKeyUsageException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the marker that specifies where pagination // should next begin is not valid. type InvalidMarkerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11497,17 +11555,17 @@ func (s InvalidMarkerException) GoString() string { func newErrorInvalidMarkerException(v protocol.ResponseMetadata) error { return &InvalidMarkerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidMarkerException) Code() string { +func (s *InvalidMarkerException) Code() string { return "InvalidMarkerException" } // Message returns the exception's message. -func (s InvalidMarkerException) Message() string { +func (s *InvalidMarkerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11515,22 +11573,22 @@ func (s InvalidMarkerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidMarkerException) OrigErr() error { +func (s *InvalidMarkerException) OrigErr() error { return nil } -func (s InvalidMarkerException) Error() string { +func (s *InvalidMarkerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidMarkerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidMarkerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidMarkerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidMarkerException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the state of the specified resource is not @@ -11540,8 +11598,8 @@ func (s InvalidMarkerException) RequestID() string { // Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide . type InvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11558,17 +11616,17 @@ func (s InvalidStateException) GoString() string { func newErrorInvalidStateException(v protocol.ResponseMetadata) error { return &InvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStateException) Code() string { +func (s *InvalidStateException) Code() string { return "KMSInvalidStateException" } // Message returns the exception's message. -func (s InvalidStateException) Message() string { +func (s *InvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11576,30 +11634,30 @@ func (s InvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStateException) OrigErr() error { +func (s *InvalidStateException) OrigErr() error { return nil } -func (s InvalidStateException) Error() string { +func (s *InvalidStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStateException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the signature verification failed. Signature // verification fails when it cannot confirm that signature was produced by // signing the specified message with the specified CMK and signing algorithm. type KMSInvalidSignatureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11616,17 +11674,17 @@ func (s KMSInvalidSignatureException) GoString() string { func newErrorKMSInvalidSignatureException(v protocol.ResponseMetadata) error { return &KMSInvalidSignatureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSInvalidSignatureException) Code() string { +func (s *KMSInvalidSignatureException) Code() string { return "KMSInvalidSignatureException" } // Message returns the exception's message. -func (s KMSInvalidSignatureException) Message() string { +func (s *KMSInvalidSignatureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11634,22 +11692,22 @@ func (s KMSInvalidSignatureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSInvalidSignatureException) OrigErr() error { +func (s *KMSInvalidSignatureException) OrigErr() error { return nil } -func (s KMSInvalidSignatureException) Error() string { +func (s *KMSInvalidSignatureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSInvalidSignatureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSInvalidSignatureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSInvalidSignatureException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSInvalidSignatureException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about each entry in the key list. @@ -11729,8 +11787,8 @@ type KeyMetadata struct { // is true, otherwise it is false. Enabled *bool `type:"boolean"` - // A list of encryption algorithms that the CMK supports. You cannot use the - // CMK with other encryption algorithms within AWS KMS. + // The encryption algorithms that the CMK supports. You cannot use the CMK with + // other encryption algorithms within AWS KMS. // // This field appears only when the KeyUsage of the CMK is ENCRYPT_DECRYPT. EncryptionAlgorithms []*string `type:"list"` @@ -11750,14 +11808,15 @@ type KeyMetadata struct { // in the AWS Key Management Service Developer Guide. KeyManager *string `type:"string" enum:"KeyManagerType"` - // The state of the CMK. + // The current status of the CMK. // - // For more information about how key state affects the use of a CMK, see How - // Key State Affects the Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // For more information about how key state affects the use of a CMK, see Key + // state: Effect on your CMK (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. KeyState *string `type:"string" enum:"KeyState"` - // The cryptographic operations for which you can use the CMK. + // The cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // for which you can use the CMK. KeyUsage *string `type:"string" enum:"KeyUsageType"` // The source of the CMK's key material. When this value is AWS_KMS, AWS KMS @@ -11767,8 +11826,8 @@ type KeyMetadata struct { // in the AWS CloudHSM cluster associated with a custom key store. Origin *string `type:"string" enum:"OriginType"` - // A list of signing algorithms that the CMK supports. You cannot use the CMK - // with other signing algorithms within AWS KMS. + // The signing algorithms that the CMK supports. You cannot use the CMK with + // other signing algorithms within AWS KMS. // // This field appears only when the KeyUsage of the CMK is SIGN_VERIFY. SigningAlgorithms []*string `type:"list"` @@ -11901,8 +11960,8 @@ func (s *KeyMetadata) SetValidTo(v time.Time) *KeyMetadata { // The request was rejected because the specified CMK was not available. You // can retry the request. type KeyUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11919,17 +11978,17 @@ func (s KeyUnavailableException) GoString() string { func newErrorKeyUnavailableException(v protocol.ResponseMetadata) error { return &KeyUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KeyUnavailableException) Code() string { +func (s *KeyUnavailableException) Code() string { return "KeyUnavailableException" } // Message returns the exception's message. -func (s KeyUnavailableException) Message() string { +func (s *KeyUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11937,30 +11996,30 @@ func (s KeyUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KeyUnavailableException) OrigErr() error { +func (s *KeyUnavailableException) OrigErr() error { return nil } -func (s KeyUnavailableException) Error() string { +func (s *KeyUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KeyUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KeyUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KeyUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *KeyUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } -// The request was rejected because a limit was exceeded. For more information, -// see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11977,17 +12036,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11995,22 +12054,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListAliasesInput struct { @@ -12701,8 +12760,8 @@ func (s *ListRetirableGrantsInput) SetRetiringPrincipal(v string) *ListRetirable // The request was rejected because the specified policy is not syntactically // or semantically correct. type MalformedPolicyDocumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12719,17 +12778,17 @@ func (s MalformedPolicyDocumentException) GoString() string { func newErrorMalformedPolicyDocumentException(v protocol.ResponseMetadata) error { return &MalformedPolicyDocumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MalformedPolicyDocumentException) Code() string { +func (s *MalformedPolicyDocumentException) Code() string { return "MalformedPolicyDocumentException" } // Message returns the exception's message. -func (s MalformedPolicyDocumentException) Message() string { +func (s *MalformedPolicyDocumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12737,29 +12796,29 @@ func (s MalformedPolicyDocumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MalformedPolicyDocumentException) OrigErr() error { +func (s *MalformedPolicyDocumentException) OrigErr() error { return nil } -func (s MalformedPolicyDocumentException) Error() string { +func (s *MalformedPolicyDocumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MalformedPolicyDocumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MalformedPolicyDocumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MalformedPolicyDocumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *MalformedPolicyDocumentException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified entity or resource could not // be found. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12776,17 +12835,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12794,22 +12853,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type PutKeyPolicyInput struct { @@ -12864,7 +12923,9 @@ type PutKeyPolicyInput struct { // immediately visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) // in the AWS Identity and Access Management User Guide. // - // The key policy size limit is 32 kilobytes (32768 bytes). + // The key policy cannot exceed 32 kilobytes (32768 bytes). For more information, + // see Resource Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/resource-limits.html) + // in the AWS Key Management Service Developer Guide. // // Policy is a required field Policy *string `min:"1" type:"string" required:"true"` @@ -13164,7 +13225,8 @@ type ReEncryptOutput struct { // The encryption algorithm that was used to reencrypt the data. DestinationEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - // Unique identifier of the CMK used to reencrypt the data. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that was used to reencrypt the data. KeyId *string `min:"1" type:"string"` // The encryption algorithm that was used to decrypt the ciphertext before it @@ -13450,8 +13512,8 @@ type ScheduleKeyDeletionOutput struct { // The date and time after which AWS KMS deletes the customer master key (CMK). DeletionDate *time.Time `type:"timestamp"` - // The unique identifier of the customer master key (CMK) for which deletion - // is scheduled. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK whose deletion is scheduled. KeyId *string `min:"1" type:"string"` } @@ -13522,8 +13584,8 @@ type SignInput struct { Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"` // Tells AWS KMS whether the value of the Message parameter is a message or - // message digest. To indicate a message, enter RAW. To indicate a message digest, - // enter DIGEST. + // message digest. The default value, RAW, indicates a message. To indicate + // a message digest, enter DIGEST. MessageType *string `type:"string" enum:"MessageType"` // Specifies the signing algorithm to use when signing the message. @@ -13603,12 +13665,24 @@ func (s *SignInput) SetSigningAlgorithm(v string) *SignInput { type SignOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the asymmetric CMK that was used to sign - // the message. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric CMK that was used to sign the message. KeyId *string `min:"1" type:"string"` // The cryptographic signature that was generated for the message. // + // * When used with the supported RSA signing algorithms, the encoding of + // this value is defined by PKCS #1 in RFC 8017 (https://tools.ietf.org/html/rfc8017). + // + // * When used with the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing + // algorithms, this value is a DER-encoded object as defined by ANS X9.62–2005 + // and RFC 3279 Section 2.2.3 (https://tools.ietf.org/html/rfc3279#section-2.2.3). + // This is the most commonly used signature format and is appropriate for + // most uses. + // + // When you use the HTTP API or the AWS CLI, the value is Base64-encoded. Otherwise, + // it is not Base64-encoded. + // // Signature is automatically base64 encoded/decoded by the SDK. Signature []byte `min:"1" type:"blob"` @@ -13707,8 +13781,8 @@ func (s *Tag) SetTagValue(v string) *Tag { // The request was rejected because one or more tags are not valid. type TagException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13725,17 +13799,17 @@ func (s TagException) GoString() string { func newErrorTagException(v protocol.ResponseMetadata) error { return &TagException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagException) Code() string { +func (s *TagException) Code() string { return "TagException" } // Message returns the exception's message. -func (s TagException) Message() string { +func (s *TagException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13743,22 +13817,22 @@ func (s TagException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagException) OrigErr() error { +func (s *TagException) OrigErr() error { return nil } -func (s TagException) Error() string { +func (s *TagException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagException) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -13853,8 +13927,8 @@ func (s TagResourceOutput) GoString() string { // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13871,17 +13945,17 @@ func (s UnsupportedOperationException) GoString() string { func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { return &UnsupportedOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperationException) Code() string { +func (s *UnsupportedOperationException) Code() string { return "UnsupportedOperationException" } // Message returns the exception's message. -func (s UnsupportedOperationException) Message() string { +func (s *UnsupportedOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13889,22 +13963,22 @@ func (s UnsupportedOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperationException) OrigErr() error { +func (s *UnsupportedOperationException) OrigErr() error { return nil } -func (s UnsupportedOperationException) Error() string { +func (s *UnsupportedOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperationException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -14105,7 +14179,7 @@ type UpdateCustomKeyStoreInput struct { // This parameter tells AWS KMS the current password of the kmsuser crypto user // (CU). It does not set or change the password of any users in the AWS CloudHSM // cluster. - KeyStorePassword *string `min:"1" type:"string" sensitive:"true"` + KeyStorePassword *string `min:"7" type:"string" sensitive:"true"` // Changes the friendly name of the custom key store to the value that you specify. // The custom key store name must be unique in the AWS account. @@ -14134,8 +14208,8 @@ func (s *UpdateCustomKeyStoreInput) Validate() error { if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) } - if s.KeyStorePassword != nil && len(*s.KeyStorePassword) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyStorePassword", 1)) + if s.KeyStorePassword != nil && len(*s.KeyStorePassword) < 7 { + invalidParams.Add(request.NewErrParamMinLen("KeyStorePassword", 7)) } if s.NewCustomKeyStoreName != nil && len(*s.NewCustomKeyStoreName) < 1 { invalidParams.Add(request.NewErrParamMinLen("NewCustomKeyStoreName", 1)) @@ -14297,12 +14371,13 @@ type VerifyInput struct { // KeyId is a required field KeyId *string `min:"1" type:"string" required:"true"` - // Specifies the message that was signed, or a hash digest of that message. - // Messages can be 0-4096 bytes. To verify a larger message, provide a hash - // digest of the message. + // Specifies the message that was signed. You can submit a raw message of up + // to 4096 bytes, or a hash digest of the message. If you submit a digest, use + // the MessageType parameter with a value of DIGEST. // - // If the digest of the message specified here is different from the message - // digest that was signed, the signature verification fails. + // If the message specified here is different from the message that was signed, + // the signature verification fails. A message and its hash digest are considered + // to be the same message. // // Message is automatically base64 encoded/decoded by the SDK. // @@ -14310,8 +14385,12 @@ type VerifyInput struct { Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"` // Tells AWS KMS whether the value of the Message parameter is a message or - // message digest. To indicate a message, enter RAW. To indicate a message digest, - // enter DIGEST. + // message digest. The default value, RAW, indicates a message. To indicate + // a message digest, enter DIGEST. + // + // Use the DIGEST value only when the value of the Message parameter is a message + // digest. If you use the DIGEST value with a raw message, the security of the + // verification operation can be compromised. MessageType *string `type:"string" enum:"MessageType"` // The signature that the Sign operation generated. @@ -14408,8 +14487,8 @@ func (s *VerifyInput) SetSigningAlgorithm(v string) *VerifyInput { type VerifyOutput struct { _ struct{} `type:"structure"` - // The unique identifier for the asymmetric CMK that was used to verify the - // signature. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric CMK that was used to verify the signature. KeyId *string `min:"1" type:"string"` // A Boolean value that indicates whether the signature was verified. A value @@ -14461,6 +14540,15 @@ const ( AlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256" ) +// AlgorithmSpec_Values returns all elements of the AlgorithmSpec enum +func AlgorithmSpec_Values() []string { + return []string{ + AlgorithmSpecRsaesPkcs1V15, + AlgorithmSpecRsaesOaepSha1, + AlgorithmSpecRsaesOaepSha256, + } +} + const ( // ConnectionErrorCodeTypeInvalidCredentials is a ConnectionErrorCodeType enum value ConnectionErrorCodeTypeInvalidCredentials = "INVALID_CREDENTIALS" @@ -14479,8 +14567,32 @@ const ( // ConnectionErrorCodeTypeUserLockedOut is a ConnectionErrorCodeType enum value ConnectionErrorCodeTypeUserLockedOut = "USER_LOCKED_OUT" + + // ConnectionErrorCodeTypeUserNotFound is a ConnectionErrorCodeType enum value + ConnectionErrorCodeTypeUserNotFound = "USER_NOT_FOUND" + + // ConnectionErrorCodeTypeUserLoggedIn is a ConnectionErrorCodeType enum value + ConnectionErrorCodeTypeUserLoggedIn = "USER_LOGGED_IN" + + // ConnectionErrorCodeTypeSubnetNotFound is a ConnectionErrorCodeType enum value + ConnectionErrorCodeTypeSubnetNotFound = "SUBNET_NOT_FOUND" ) +// ConnectionErrorCodeType_Values returns all elements of the ConnectionErrorCodeType enum +func ConnectionErrorCodeType_Values() []string { + return []string{ + ConnectionErrorCodeTypeInvalidCredentials, + ConnectionErrorCodeTypeClusterNotFound, + ConnectionErrorCodeTypeNetworkErrors, + ConnectionErrorCodeTypeInternalError, + ConnectionErrorCodeTypeInsufficientCloudhsmHsms, + ConnectionErrorCodeTypeUserLockedOut, + ConnectionErrorCodeTypeUserNotFound, + ConnectionErrorCodeTypeUserLoggedIn, + ConnectionErrorCodeTypeSubnetNotFound, + } +} + const ( // ConnectionStateTypeConnected is a ConnectionStateType enum value ConnectionStateTypeConnected = "CONNECTED" @@ -14498,6 +14610,17 @@ const ( ConnectionStateTypeDisconnecting = "DISCONNECTING" ) +// ConnectionStateType_Values returns all elements of the ConnectionStateType enum +func ConnectionStateType_Values() []string { + return []string{ + ConnectionStateTypeConnected, + ConnectionStateTypeConnecting, + ConnectionStateTypeFailed, + ConnectionStateTypeDisconnected, + ConnectionStateTypeDisconnecting, + } +} + const ( // CustomerMasterKeySpecRsa2048 is a CustomerMasterKeySpec enum value CustomerMasterKeySpecRsa2048 = "RSA_2048" @@ -14524,6 +14647,20 @@ const ( CustomerMasterKeySpecSymmetricDefault = "SYMMETRIC_DEFAULT" ) +// CustomerMasterKeySpec_Values returns all elements of the CustomerMasterKeySpec enum +func CustomerMasterKeySpec_Values() []string { + return []string{ + CustomerMasterKeySpecRsa2048, + CustomerMasterKeySpecRsa3072, + CustomerMasterKeySpecRsa4096, + CustomerMasterKeySpecEccNistP256, + CustomerMasterKeySpecEccNistP384, + CustomerMasterKeySpecEccNistP521, + CustomerMasterKeySpecEccSecgP256k1, + CustomerMasterKeySpecSymmetricDefault, + } +} + const ( // DataKeyPairSpecRsa2048 is a DataKeyPairSpec enum value DataKeyPairSpecRsa2048 = "RSA_2048" @@ -14547,6 +14684,19 @@ const ( DataKeyPairSpecEccSecgP256k1 = "ECC_SECG_P256K1" ) +// DataKeyPairSpec_Values returns all elements of the DataKeyPairSpec enum +func DataKeyPairSpec_Values() []string { + return []string{ + DataKeyPairSpecRsa2048, + DataKeyPairSpecRsa3072, + DataKeyPairSpecRsa4096, + DataKeyPairSpecEccNistP256, + DataKeyPairSpecEccNistP384, + DataKeyPairSpecEccNistP521, + DataKeyPairSpecEccSecgP256k1, + } +} + const ( // DataKeySpecAes256 is a DataKeySpec enum value DataKeySpecAes256 = "AES_256" @@ -14555,6 +14705,14 @@ const ( DataKeySpecAes128 = "AES_128" ) +// DataKeySpec_Values returns all elements of the DataKeySpec enum +func DataKeySpec_Values() []string { + return []string{ + DataKeySpecAes256, + DataKeySpecAes128, + } +} + const ( // EncryptionAlgorithmSpecSymmetricDefault is a EncryptionAlgorithmSpec enum value EncryptionAlgorithmSpecSymmetricDefault = "SYMMETRIC_DEFAULT" @@ -14566,6 +14724,15 @@ const ( EncryptionAlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256" ) +// EncryptionAlgorithmSpec_Values returns all elements of the EncryptionAlgorithmSpec enum +func EncryptionAlgorithmSpec_Values() []string { + return []string{ + EncryptionAlgorithmSpecSymmetricDefault, + EncryptionAlgorithmSpecRsaesOaepSha1, + EncryptionAlgorithmSpecRsaesOaepSha256, + } +} + const ( // ExpirationModelTypeKeyMaterialExpires is a ExpirationModelType enum value ExpirationModelTypeKeyMaterialExpires = "KEY_MATERIAL_EXPIRES" @@ -14574,6 +14741,14 @@ const ( ExpirationModelTypeKeyMaterialDoesNotExpire = "KEY_MATERIAL_DOES_NOT_EXPIRE" ) +// ExpirationModelType_Values returns all elements of the ExpirationModelType enum +func ExpirationModelType_Values() []string { + return []string{ + ExpirationModelTypeKeyMaterialExpires, + ExpirationModelTypeKeyMaterialDoesNotExpire, + } +} + const ( // GrantOperationDecrypt is a GrantOperation enum value GrantOperationDecrypt = "Decrypt" @@ -14618,6 +14793,26 @@ const ( GrantOperationGenerateDataKeyPairWithoutPlaintext = "GenerateDataKeyPairWithoutPlaintext" ) +// GrantOperation_Values returns all elements of the GrantOperation enum +func GrantOperation_Values() []string { + return []string{ + GrantOperationDecrypt, + GrantOperationEncrypt, + GrantOperationGenerateDataKey, + GrantOperationGenerateDataKeyWithoutPlaintext, + GrantOperationReEncryptFrom, + GrantOperationReEncryptTo, + GrantOperationSign, + GrantOperationVerify, + GrantOperationGetPublicKey, + GrantOperationCreateGrant, + GrantOperationRetireGrant, + GrantOperationDescribeKey, + GrantOperationGenerateDataKeyPair, + GrantOperationGenerateDataKeyPairWithoutPlaintext, + } +} + const ( // KeyManagerTypeAws is a KeyManagerType enum value KeyManagerTypeAws = "AWS" @@ -14626,6 +14821,14 @@ const ( KeyManagerTypeCustomer = "CUSTOMER" ) +// KeyManagerType_Values returns all elements of the KeyManagerType enum +func KeyManagerType_Values() []string { + return []string{ + KeyManagerTypeAws, + KeyManagerTypeCustomer, + } +} + const ( // KeyStateEnabled is a KeyState enum value KeyStateEnabled = "Enabled" @@ -14643,6 +14846,17 @@ const ( KeyStateUnavailable = "Unavailable" ) +// KeyState_Values returns all elements of the KeyState enum +func KeyState_Values() []string { + return []string{ + KeyStateEnabled, + KeyStateDisabled, + KeyStatePendingDeletion, + KeyStatePendingImport, + KeyStateUnavailable, + } +} + const ( // KeyUsageTypeSignVerify is a KeyUsageType enum value KeyUsageTypeSignVerify = "SIGN_VERIFY" @@ -14651,6 +14865,14 @@ const ( KeyUsageTypeEncryptDecrypt = "ENCRYPT_DECRYPT" ) +// KeyUsageType_Values returns all elements of the KeyUsageType enum +func KeyUsageType_Values() []string { + return []string{ + KeyUsageTypeSignVerify, + KeyUsageTypeEncryptDecrypt, + } +} + const ( // MessageTypeRaw is a MessageType enum value MessageTypeRaw = "RAW" @@ -14659,6 +14881,14 @@ const ( MessageTypeDigest = "DIGEST" ) +// MessageType_Values returns all elements of the MessageType enum +func MessageType_Values() []string { + return []string{ + MessageTypeRaw, + MessageTypeDigest, + } +} + const ( // OriginTypeAwsKms is a OriginType enum value OriginTypeAwsKms = "AWS_KMS" @@ -14670,6 +14900,15 @@ const ( OriginTypeAwsCloudhsm = "AWS_CLOUDHSM" ) +// OriginType_Values returns all elements of the OriginType enum +func OriginType_Values() []string { + return []string{ + OriginTypeAwsKms, + OriginTypeExternal, + OriginTypeAwsCloudhsm, + } +} + const ( // SigningAlgorithmSpecRsassaPssSha256 is a SigningAlgorithmSpec enum value SigningAlgorithmSpecRsassaPssSha256 = "RSASSA_PSS_SHA_256" @@ -14699,7 +14938,29 @@ const ( SigningAlgorithmSpecEcdsaSha512 = "ECDSA_SHA_512" ) +// SigningAlgorithmSpec_Values returns all elements of the SigningAlgorithmSpec enum +func SigningAlgorithmSpec_Values() []string { + return []string{ + SigningAlgorithmSpecRsassaPssSha256, + SigningAlgorithmSpecRsassaPssSha384, + SigningAlgorithmSpecRsassaPssSha512, + SigningAlgorithmSpecRsassaPkcs1V15Sha256, + SigningAlgorithmSpecRsassaPkcs1V15Sha384, + SigningAlgorithmSpecRsassaPkcs1V15Sha512, + SigningAlgorithmSpecEcdsaSha256, + SigningAlgorithmSpecEcdsaSha384, + SigningAlgorithmSpecEcdsaSha512, + } +} + const ( // WrappingKeySpecRsa2048 is a WrappingKeySpec enum value WrappingKeySpecRsa2048 = "RSA_2048" ) + +// WrappingKeySpec_Values returns all elements of the WrappingKeySpec enum +func WrappingKeySpec_Values() []string { + return []string{ + WrappingKeySpecRsa2048, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go index 7d2d6cd1349b..911bf576ebf7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go @@ -297,8 +297,8 @@ const ( // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // - // The request was rejected because a limit was exceeded. For more information, - // see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) + // The request was rejected because a quota was exceeded. For more information, + // see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. ErrCodeLimitExceededException = "LimitExceededException" diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 7f60d4aa1851..bfc4372f9fde 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -207,6 +207,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) @@ -626,7 +630,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) // and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). // Walk through the process of authenticating through Login with Amazon, // Facebook, or Google, getting temporary security credentials, and then // using those credentials to make a request to AWS. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index fcb720dcac6d..cb1debbaa45c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -3,87 +3,11 @@ // Package sts provides the client and types for making API // requests to AWS Security Token Service. // -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// By default, AWS Security Token Service (STS) is available as a global service, -// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. -// Global requests map to the US East (N. Virginia) region. AWS recommends using -// Regional AWS STS endpoints instead of the global endpoint to reduce latency, -// build in redundancy, and increase session token validity. For more information, -// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Most AWS Regions are enabled for operations in all AWS services by default. -// Those Regions are automatically activated for use with AWS STS. Some Regions, -// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more -// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) -// in the AWS General Reference. When you enable these AWS Regions, they are -// automatically activated for use with AWS STS. You cannot activate the STS -// endpoint for a Region that is disabled. Tokens that are valid in all AWS -// Regions are longer than tokens that are valid in Regions that are enabled -// by default. Changing this setting might affect existing systems where you -// temporarily store tokens. For more information, see Managing Global Endpoint -// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) -// in the IAM User Guide. -// -// After you activate a Region for use with AWS STS, you can direct AWS STS -// API calls to that Region. AWS STS recommends that you provide both the Region -// and endpoint when you make calls to a Regional endpoint. You can provide -// the Region alone for manually enabled Regions, such as Asia Pacific (Hong -// Kong). In this case, the calls are directed to the STS Regional endpoint. -// However, if you provide the Region alone for Regions enabled by default, -// the calls are directed to the global endpoint of https://sts.amazonaws.com. -// -// To view the list of AWS STS endpoints and whether they are active by default, -// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) -// in the IAM User Guide. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. -// -// If you activate AWS STS endpoints in Regions other than the default global -// endpoint, then you must also turn on CloudTrail logging in those Regions. -// This is necessary to record any AWS STS API calls that are made in those -// Regions. For more information, see Turning On CloudTrail in Additional Regions -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) -// in the AWS CloudTrail User Guide. -// -// AWS Security Token Service (STS) is a global service with a single endpoint -// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls -// to a global service. However, because this endpoint is physically located -// in the US East (N. Virginia) Region, your logs list us-east-1 as the event -// Region. CloudTrail does not write these logs to the US East (Ohio) Region -// unless you choose to include global service logs in that Region. CloudTrail -// writes calls to all Regional endpoints to their respective Regions. For example, -// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) -// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU -// (Frankfurt) Region. -// -// To learn more about CloudTrail, including how to turn it on and find your -// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// AWS Security Token Service (STS) enables you to request temporary, limited-privilege +// credentials for AWS Identity and Access Management (IAM) users or for users +// that you authenticate (federated users). This guide provides descriptions +// of the STS API. For more information about using this service, see Temporary +// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff --git a/vendor/github.com/checkpoint-restore/go-criu/v4/.travis.yml b/vendor/github.com/checkpoint-restore/go-criu/v4/.travis.yml index 9b5b025582cf..85e0cde3790a 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v4/.travis.yml +++ b/vendor/github.com/checkpoint-restore/go-criu/v4/.travis.yml @@ -1,5 +1,5 @@ language: go -sudo: required +dist: bionic os: - linux go: @@ -13,6 +13,7 @@ env: install: - sudo apt-get update - sudo apt-get install -y libprotobuf-dev libprotobuf-c0-dev protobuf-c-compiler protobuf-compiler python-protobuf libnl-3-dev libnet-dev libcap-dev + - make install.tools - go get github.com/checkpoint-restore/go-criu - git clone --single-branch -b ${CRIU_BRANCH} https://github.com/checkpoint-restore/criu.git - cd criu; make @@ -20,6 +21,8 @@ install: - cd .. script: # This builds the code without running the tests. - - make build phaul test/test test/phaul test/piggie + - make lint build phaul test/test test/phaul test/piggie # Run actual test as root as it uses CRIU. - sudo make test phaul-test + # This builds crit-go + - make -C crit-go/magic-gen lint build magicgen test diff --git a/vendor/github.com/checkpoint-restore/go-criu/v4/Makefile b/vendor/github.com/checkpoint-restore/go-criu/v4/Makefile index ee44ee448629..10356304b1f0 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v4/Makefile +++ b/vendor/github.com/checkpoint-restore/go-criu/v4/Makefile @@ -12,7 +12,7 @@ endif all: build test phaul phaul-test lint: - @golint . test phaul + @golint -set_exit_status . test phaul build: @$(GO) build -v diff --git a/vendor/github.com/checkpoint-restore/go-criu/v4/go.sum b/vendor/github.com/checkpoint-restore/go-criu/v4/go.sum index bf6db1068851..6124ed3e451b 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v4/go.sum +++ b/vendor/github.com/checkpoint-restore/go-criu/v4/go.sum @@ -1,20 +1,2 @@ github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= diff --git a/vendor/github.com/checkpoint-restore/go-criu/v4/main.go b/vendor/github.com/checkpoint-restore/go-criu/v4/main.go index d152bfc535ec..d0a6da47837b 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v4/main.go +++ b/vendor/github.com/checkpoint-restore/go-criu/v4/main.go @@ -14,13 +14,22 @@ import ( // Criu struct type Criu struct { - swrkCmd *exec.Cmd - swrkSk *os.File + swrkCmd *exec.Cmd + swrkSk *os.File + swrkPath string } // MakeCriu returns the Criu object required for most operations func MakeCriu() *Criu { - return &Criu{} + return &Criu{ + swrkPath: "criu", + } +} + +// SetCriuPath allows setting the path to the CRIU binary +// if it is in a non standard location +func (c *Criu) SetCriuPath(path string) { + c.swrkPath = path } // Prepare sets up everything for the RPC communication to CRIU @@ -36,7 +45,7 @@ func (c *Criu) Prepare() error { defer srv.Close() args := []string{"swrk", strconv.Itoa(fds[1])} - cmd := exec.Command("criu", args...) + cmd := exec.Command(c.swrkPath, args...) err = cmd.Start() if err != nil { diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 851e81014b21..c5f725bc19d3 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -44,7 +44,7 @@ Ajey Charantimath ajneu Akash Gupta Akihiro Matsushima -Akihiro Suda +Akihiro Suda Akim Demaille Akira Koyasu Akshay Karle @@ -81,6 +81,7 @@ Alexandre Garnier Alexandre González Alexandre Jomin Alexandru Sfirlogea +Alexei Margasov Alexey Guskov Alexey Kotlyarov Alexey Shamrin @@ -153,6 +154,7 @@ Andy Wilson Anes Hasicic Anil Belur Anil Madhavapeddy +Ankit Jain Ankush Agarwal Anonmily Anran Qiao @@ -184,6 +186,7 @@ Asad Saeeduddin Asbjørn Enge averagehuman Avi Das +Avi Kivity Avi Miller Avi Vaid ayoshitake @@ -507,6 +510,7 @@ Dmitri Shuralyov Dmitry Demeshchuk Dmitry Gusev Dmitry Kononenko +Dmitry Sharshakov Dmitry Shyshkin Dmitry Smirnov Dmitry V. Krivenok @@ -656,6 +660,7 @@ Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter Frieder Bluemle +Fu JinLin Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse @@ -688,6 +693,7 @@ Ghislain Bourgeois Giampaolo Mancini Gianluca Borello Gildas Cuisinier +Giovan Isa Musthofa gissehel Giuseppe Mazzotta Gleb Fotengauer-Malinovskiy @@ -898,6 +904,7 @@ Jimmy Cuadra Jimmy Puckett Jimmy Song Jinsoo Park +Jintao Zhang Jiri Appl Jiri Popelka Jiuyue Ma @@ -1079,6 +1086,7 @@ Kunal Kushwaha Kunal Tyagi Kyle Conroy Kyle Linden +Kyle Wuolle kyu Lachlan Coote Lai Jiangshan @@ -1255,6 +1263,7 @@ Maxim Kulkin Maxim Treskin Maxime Petazzoni Maximiliano Maccanti +Maxwell Meaglith Ma meejah Megan Kostick @@ -1519,6 +1528,7 @@ Quentin Brossard Quentin Perez Quentin Tayssier r0n22 +Radostin Stoyanov Rafal Jeczalik Rafe Colton Raghavendra K T @@ -1976,6 +1986,7 @@ xamyzhao Xian Chaobo Xianglin Gao Xianlu Bird +Xiao YongBiao XiaoBing Jiang Xiaodong Zhang Xiaoxi He diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE index 0c74e15b057f..58b19b6d15b9 100644 --- a/vendor/github.com/docker/docker/NOTICE +++ b/vendor/github.com/docker/docker/NOTICE @@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). -This product contains software (https://github.com/kr/pty) developed +This product contains software (https://github.com/creack/pty) developed by Keith Rarick, licensed under the MIT License. The following is courtesy of our legal counsel: diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 78c576cef25e..70b09a0e3b73 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -26,13 +26,19 @@ info: x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | - The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. - Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. # Errors - The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: ``` { @@ -65,7 +71,11 @@ info: # Authentication - Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: ``` { @@ -76,9 +86,11 @@ info: } ``` - The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. - If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: ``` { @@ -104,7 +116,9 @@ tags: - name: "Network" x-displayName: "Networks" description: | - Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. - name: "Volume" x-displayName: "Volumes" description: | @@ -112,34 +126,46 @@ tags: - name: "Exec" x-displayName: "Exec" description: | - Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. - To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. # Swarm things - name: "Swarm" x-displayName: "Swarm" description: | - Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. - name: "Node" x-displayName: "Nodes" description: | - Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. - name: "Service" x-displayName: "Services" description: | - Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. - name: "Task" x-displayName: "Tasks" description: | - A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. - name: "Secret" x-displayName: "Secrets" description: | - Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. - name: "Config" x-displayName: "Configs" description: | - Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" @@ -345,9 +371,11 @@ definitions: RestartPolicy: description: | - The behavior to apply when the container exits. The default is not to restart. + The behavior to apply when the container exits. The default is not to + restart. - An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. type: "object" properties: Name: @@ -364,7 +392,8 @@ definitions: - "on-failure" MaximumRetryCount: type: "integer" - description: "If `on-failure` is used, the number of times to retry before giving up" + description: | + If `on-failure` is used, the number of times to retry before giving up. Resources: description: "A container's resources (cgroups config, ulimits, etc)" @@ -372,7 +401,9 @@ definitions: properties: # Applicable to all platforms CpuShares: - description: "An integer value representing this container's relative CPU weight versus other containers." + description: | + An integer value representing this container's relative CPU weight + versus other containers. type: "integer" Memory: description: "Memory limit in bytes." @@ -381,7 +412,11 @@ definitions: default: 0 # Applicable to UNIX platforms CgroupParent: - description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. type: "string" BlkioWeight: description: "Block IO weight (relative weight)." @@ -390,7 +425,11 @@ definitions: maximum: 1000 BlkioWeightDevice: description: | - Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` type: "array" items: type: "object" @@ -402,25 +441,41 @@ definitions: minimum: 0 BlkioDeviceReadBps: description: | - Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteBps: description: | - Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceReadIOps: description: | - Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" BlkioDeviceWriteIOps: description: | - Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` type: "array" items: $ref: "#/definitions/ThrottleDevice" @@ -429,23 +484,31 @@ definitions: type: "integer" format: "int64" CpuQuota: - description: "Microseconds of CPU time that the container can get in a CPU period." + description: | + Microseconds of CPU time that the container can get in a CPU period. type: "integer" format: "int64" CpuRealtimePeriod: - description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpuRealtimeRuntime: - description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. type: "integer" format: "int64" CpusetCpus: - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). type: "string" example: "0-3" CpusetMems: - description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. type: "string" Devices: description: "A list of devices to add to the container." @@ -459,14 +522,11 @@ definitions: type: "string" example: "c 13:* rwm" DeviceRequests: - description: "a list of requests for devices to be sent to device drivers" + description: | + A list of requests for devices to be sent to device drivers. type: "array" items: $ref: "#/definitions/DeviceRequest" - DiskQuota: - description: "Disk limit (in bytes)." - type: "integer" - format: "int64" KernelMemory: description: "Kernel memory limit in bytes." type: "integer" @@ -481,11 +541,15 @@ definitions: type: "integer" format: "int64" MemorySwap: - description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. type: "integer" format: "int64" MemorySwappiness: - description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. type: "integer" format: "int64" minimum: 0 @@ -498,18 +562,26 @@ definitions: description: "Disable OOM Killer for the container." type: "boolean" Init: - description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. type: "boolean" x-nullable: true PidsLimit: description: | - Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. type: "integer" format: "int64" x-nullable: true Ulimits: description: | - A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` type: "array" items: type: "object" @@ -528,14 +600,18 @@ definitions: description: | The number of usable CPUs (Windows only). - On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" CpuPercent: description: | The usable percentage of the available CPUs (Windows only). - On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. type: "integer" format: "int64" IOMaximumIOps: @@ -543,12 +619,16 @@ definitions: type: "integer" format: "int64" IOMaximumBandwidth: - description: "Maximum IO in bytes per second for the container system drive (Windows only)" + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). type: "integer" format: "int64" ResourceObject: - description: "An object describing the resources which can be advertised by a node and requested by a task" + description: | + An object describing the resources which can be advertised by a node and + requested by a task. type: "object" properties: NanoCPUs: @@ -563,7 +643,9 @@ definitions: $ref: "#/definitions/GenericResources" GenericResources: - description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)" + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). type: "array" items: type: "object" @@ -610,18 +692,92 @@ definitions: items: type: "string" Interval: - description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. type: "integer" Timeout: - description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" Retries: - description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. type: "integer" StartPeriod: - description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit." + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. type: "integer" + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + x-nullable: true + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + HostConfig: description: "Container configuration that depends on the host we are running on" allOf: @@ -632,12 +788,44 @@ definitions: Binds: type: "array" description: | - A list of volume bindings for this container. Each volume binding is a string in one of these forms: - - - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - - `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. - - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. items: type: "string" ContainerIDFile: @@ -665,25 +853,33 @@ definitions: type: "string" NetworkMode: type: "string" - description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken - as a custom network's name to which this container should connect to." + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. PortBindings: $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: type: "boolean" - description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. VolumeDriver: type: "string" description: "Driver that this container uses to mount volumes." VolumesFrom: type: "array" - description: "A list of volumes to inherit from another container, specified in the form `[:]`." + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. items: type: "string" Mounts: - description: "Specification for mounts to be added to the container." + description: | + Specification for mounts to be added to the container. type: "array" items: $ref: "#/definitions/Mount" @@ -692,19 +888,24 @@ definitions: Capabilities: type: "array" description: | - A list of kernel capabilities to be available for container (this overrides the default set). + A list of kernel capabilities to be available for container (this + overrides the default set). Conflicts with options 'CapAdd' and 'CapDrop'" items: type: "string" CapAdd: type: "array" - description: "A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. items: type: "string" CapDrop: type: "array" - description: "A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. items: type: "string" Dns: @@ -725,43 +926,49 @@ definitions: ExtraHosts: type: "array" description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. items: type: "string" GroupAdd: type: "array" - description: "A list of additional groups that the container process will run as." + description: | + A list of additional groups that the container process will run as. items: type: "string" IpcMode: type: "string" description: | - IPC sharing mode for the container. Possible values are: + IPC sharing mode for the container. Possible values are: - - `"none"`: own private IPC namespace, with /dev/shm not mounted - - `"private"`: own private IPC namespace - - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - - `"container:"`: join another (shareable) container's IPC namespace - - `"host"`: use the host system's IPC namespace + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace - If not specified, daemon default is used, which can either be `"private"` - or `"shareable"`, depending on daemon version and configuration. + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." Links: type: "array" - description: "A list of links for the container in the form `container_name:alias`." + description: | + A list of links for the container in the form `container_name:alias`. items: type: "string" OomScoreAdj: type: "integer" - description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. example: 500 PidMode: type: "string" description: | - Set the PID (Process) Namespace mode for the container. It can be either: + Set the PID (Process) Namespace mode for the container. It can be + either: - `"container:"`: joins another container's PID namespace - `"host"`: use the host's PID namespace inside the container @@ -774,11 +981,13 @@ definitions: Allocates an ephemeral host port for all of a container's exposed ports. - Ports are de-allocated when the container stops and allocated when the container starts. - The allocated port might be changed when restarting the container. + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. - The port is selected from the ephemeral port range that depends on the kernel. - For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." @@ -797,7 +1006,12 @@ definitions: Tmpfs: type: "object" description: | - A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` additionalProperties: type: "string" UTSMode: @@ -805,15 +1019,23 @@ definitions: description: "UTS namespace to use for the container." UsernsMode: type: "string" - description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. ShmSize: type: "integer" - description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. minimum: 0 Sysctls: type: "object" description: | - A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + A list of kernel parameters (sysctls) to set in the container. + For example: + + ``` + {"net.ipv4.ip_forward": "1"} + ``` additionalProperties: type: "string" Runtime: @@ -822,7 +1044,8 @@ definitions: # Applicable to Windows ConsoleSize: type: "array" - description: "Initial console size, as an `[height, width]` array. (Windows only)" + description: | + Initial console size, as an `[height, width]` array. (Windows only) minItems: 2 maxItems: 2 items: @@ -830,19 +1053,24 @@ definitions: minimum: 0 Isolation: type: "string" - description: "Isolation technology of the container. (Windows only)" + description: | + Isolation technology of the container. (Windows only) enum: - "default" - "process" - "hyperv" MaskedPaths: type: "array" - description: "The list of paths to be masked inside the container (this overrides the default set of paths)" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). items: type: "string" ReadonlyPaths: type: "array" - description: "The list of paths to be set as read-only inside the container (this overrides the default set of paths)" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). items: type: "string" @@ -883,7 +1111,8 @@ definitions: - {} default: {} Tty: - description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. type: "boolean" default: false OpenStdin: @@ -896,12 +1125,15 @@ definitions: default: false Env: description: | - A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value. + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. type: "array" items: type: "string" Cmd: - description: "Command to run specified as a string or an array of strings." + description: | + Command to run specified as a string or an array of strings. type: "array" items: type: "string" @@ -911,10 +1143,13 @@ definitions: description: "Command is already escaped (Windows only)" type: "boolean" Image: - description: "The name of the image to use when creating the container" + description: | + The name of the image to use when creating the container/ type: "string" Volumes: - description: "An object mapping mount point paths inside the container to empty objects." + description: | + An object mapping mount point paths inside the container to empty + objects. type: "object" additionalProperties: type: "object" @@ -928,7 +1163,9 @@ definitions: description: | The entry point for the container as a string or an array of strings. - If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). type: "array" items: type: "string" @@ -939,7 +1176,8 @@ definitions: description: "MAC address of the container." type: "string" OnBuild: - description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" items: type: "string" @@ -949,7 +1187,8 @@ definitions: additionalProperties: type: "string" StopSignal: - description: "Signal to stop a container as a string or unsigned integer." + description: | + Signal to stop a container as a string or unsigned integer. type: "string" default: "SIGTERM" StopTimeout: @@ -957,11 +1196,48 @@ definitions: type: "integer" default: 10 Shell: - description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" items: type: "string" + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because containers/create currently + # does not support attaching to multiple networks, so the example request + # would be confusing if it showed that multiple networks can be contained + # in the EndpointsConfig. + # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + NetworkSettings: description: "NetworkSettings exposes the network settings in the API" type: "object" @@ -1145,6 +1421,7 @@ definitions: type: "object" additionalProperties: type: "array" + x-nullable: true items: $ref: "#/definitions/PortBinding" example: @@ -1169,7 +1446,6 @@ definitions: PortBinding represents a binding between a host IP address and a host port. type: "object" - x-nullable: true properties: HostIp: description: "Host IP address that the container's port is mapped to." @@ -1404,13 +1680,16 @@ definitions: type: "string" Scope: type: "string" - description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. default: "local" x-nullable: false enum: ["local", "global"] Options: type: "object" - description: "The driver specific options used when creating the volume." + description: | + The driver specific options used when creating the volume. additionalProperties: type: "string" UsageData: @@ -1528,7 +1807,12 @@ definitions: type: "string" default: "default" Config: - description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` type: "array" items: type: "object" @@ -1590,12 +1874,24 @@ definitions: Shared: type: "boolean" Size: + description: | + Amount of disk space used by the build cache (in bytes). type: "integer" CreatedAt: - type: "integer" + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" LastUsedAt: - type: "integer" + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" UsageCount: type: "integer" @@ -1874,7 +2170,9 @@ definitions: x-nullable: false example: "tiborvass/sample-volume-plugin" Enabled: - description: "True if the plugin is running. False if the plugin is not running, only installed." + description: + True if the plugin is running. False if the plugin is not running, + only installed. type: "boolean" x-nullable: false example: true @@ -2076,13 +2374,16 @@ definitions: ObjectVersion: description: | - The version number of the object such as node, service, etc. This is needed to avoid conflicting writes. - The client must send the version number along with the modified specification when updating these objects. - This approach ensures safe concurrency and determinism in that the change on the object - may not be applied if the version number has changed from the last read. In other words, - if two update requests specify the same base version, only one of the requests can succeed. - As a result, two separate update requests that happen at the same time will not - unintentionally overwrite each other. + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. type: "object" properties: Index: @@ -2251,17 +2552,23 @@ definitions: Name: "vieux/sshfs:latest" TLSInfo: - description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. type: "object" properties: TrustRoot: - description: "The root CA certificate(s) that are used to validate leaf TLS certificates" + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. type: "string" CertIssuerSubject: - description: "The base64-url-safe-encoded raw subject bytes of the issuer" + description: + The base64-url-safe-encoded raw subject bytes of the issuer. type: "string" CertIssuerPublicKey: - description: "The base64-url-safe-encoded raw public key bytes of the issuer" + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. type: "string" example: TrustRoot: | @@ -2357,7 +2664,9 @@ definitions: x-nullable: true properties: TaskHistoryRetentionLimit: - description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. type: "integer" format: "int64" example: 10 @@ -2371,26 +2680,34 @@ definitions: format: "uint64" example: 10000 KeepOldSnapshots: - description: "The number of snapshots to keep beyond the current snapshot." + description: | + The number of snapshots to keep beyond the current snapshot. type: "integer" format: "uint64" LogEntriesForSlowFollowers: - description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. type: "integer" format: "uint64" example: 500 ElectionTick: description: | - The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. - A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 3 HeartbeatTick: description: | - The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. - A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. type: "integer" example: 1 Dispatcher: @@ -2399,7 +2716,8 @@ definitions: x-nullable: true properties: HeartbeatPeriod: - description: "The delay for an agent to send a heartbeat to the dispatcher." + description: | + The delay for an agent to send a heartbeat to the dispatcher. type: "integer" format: "int64" example: 5000000000 @@ -2414,36 +2732,53 @@ definitions: format: "int64" example: 7776000000000000 ExternalCAs: - description: "Configuration for forwarding signing requests to an external certificate authority." + description: | + Configuration for forwarding signing requests to an external + certificate authority. type: "array" items: type: "object" properties: Protocol: - description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). type: "string" enum: - "cfssl" default: "cfssl" URL: - description: "URL where certificate signing requests should be sent." + description: | + URL where certificate signing requests should be sent. type: "string" Options: - description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. type: "object" additionalProperties: type: "string" CACert: - description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)." + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). type: "string" SigningCACert: - description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format." + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. type: "string" SigningCAKey: - description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format." + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. type: "string" ForceRotate: - description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` format: "uint64" type: "integer" EncryptionConfig: @@ -2451,7 +2786,9 @@ definitions: type: "object" properties: AutoLockManagers: - description: "If set, generate a key and use it to lock data stored on the managers." + description: | + If set, generate a key and use it to lock data stored on the + managers. type: "boolean" example: false TaskDefaults: @@ -2517,7 +2854,8 @@ definitions: TLSInfo: $ref: "#/definitions/TLSInfo" RootRotationInProgress: - description: "Whether there is currently a root CA rotation in progress for the swarm" + description: | + Whether there is currently a root CA rotation in progress for the swarm type: "boolean" example: false DataPathPort: @@ -2531,7 +2869,8 @@ definitions: example: 4789 DefaultAddrPool: description: | - Default Address Pool specifies default subnet pools for global scope networks. + Default Address Pool specifies default subnet pools for global scope + networks. type: "array" items: type: "string" @@ -2539,7 +2878,8 @@ definitions: example: ["10.10.0.0/16", "20.20.0.0/16"] SubnetSize: description: | - SubnetSize specifies the subnet size of the networks created from the default subnet pool + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. type: "integer" format: "uint32" maximum: 29 @@ -2599,7 +2939,9 @@ definitions: PluginPrivilege: type: "array" items: - description: "Describes a permission accepted by the user upon installing the plugin." + description: | + Describes a permission accepted by the user upon installing the + plugin. type: "object" properties: Name: @@ -2641,10 +2983,13 @@ definitions: items: type: "string" Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. type: "string" Env: - description: "A list of environment variables in the form `VAR=value`." + description: | + A list of environment variables in the form `VAR=value`. type: "array" items: type: "string" @@ -2656,7 +3001,8 @@ definitions: type: "string" Groups: type: "array" - description: "A list of additional groups that the container process will run as." + description: | + A list of additional groups that the container process will run as. items: type: "string" Privileges: @@ -2672,37 +3018,43 @@ definitions: example: "0bt9dmxjvjiqermk6xrop3ekq" description: | Load credential spec from a Swarm Config with the given ID. - The specified config must also be present in the Configs field with the Runtime property set. + The specified config must also be present in the Configs + field with the Runtime property set.


- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. File: type: "string" example: "spec.json" description: | - Load credential spec from this file. The file is read by the daemon, and must be present in the - `CredentialSpecs` subdirectory in the docker data directory, which defaults to - `C:\ProgramData\Docker\` on Windows. + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. - For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`.


- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | - Load credential spec from this value in the Windows registry. The specified registry value must be - located in: + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs`


- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" @@ -2732,7 +3084,9 @@ definitions: description: "Mount the container's root filesystem as read only." type: "boolean" Mounts: - description: "Specification for mounts to be added to containers created as part of the service." + description: | + Specification for mounts to be added to containers created as part + of the service. type: "array" items: $ref: "#/definitions/Mount" @@ -2740,7 +3094,9 @@ definitions: description: "Signal to stop the container." type: "string" StopGracePeriod: - description: "Amount of time to wait for the container to terminate before forcefully killing it." + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. type: "integer" format: "int64" HealthCheck: @@ -2757,7 +3113,9 @@ definitions: items: type: "string" DNSConfig: - description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). type: "object" properties: Nameservers: @@ -2771,22 +3129,28 @@ definitions: items: type: "string" Options: - description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). type: "array" items: type: "string" Secrets: - description: "Secrets contains references to zero or more secrets that will be exposed to the service." + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. type: "array" items: type: "object" properties: File: - description: "File represents a specific target that is backed by a file." + description: | + File represents a specific target that is backed by a file. type: "object" properties: Name: - description: "Name represents the final filename in the filesystem." + description: | + Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." @@ -2799,15 +3163,20 @@ definitions: type: "integer" format: "uint32" SecretID: - description: "SecretID represents the ID of the specific secret that we're referencing." + description: | + SecretID represents the ID of the specific secret that we're + referencing. type: "string" SecretName: description: | - SecretName is the name of the secret that this references, but this is just provided for - lookup/display purposes. The secret in the reference will be identified by its ID. + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. type: "string" Configs: - description: "Configs contains references to zero or more configs that will be exposed to the service." + description: | + Configs contains references to zero or more configs that will be + exposed to the service. type: "array" items: type: "object" @@ -2822,7 +3191,8 @@ definitions: type: "object" properties: Name: - description: "Name represents the final filename in the filesystem." + description: | + Name represents the final filename in the filesystem. type: "string" UID: description: "UID represents the file UID." @@ -2836,29 +3206,39 @@ definitions: format: "uint32" Runtime: description: | - Runtime represents a target that is not mounted into the container but is used by the task + Runtime represents a target that is not mounted into the + container but is used by the task


- > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive type: "object" ConfigID: - description: "ConfigID represents the ID of the specific config that we're referencing." + description: | + ConfigID represents the ID of the specific config that we're + referencing. type: "string" ConfigName: description: | - ConfigName is the name of the config that this references, but this is just provided for - lookup/display purposes. The config in the reference will be identified by its ID. + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. type: "string" Isolation: type: "string" - description: "Isolation technology of the containers running the service. (Windows only)" + description: | + Isolation technology of the containers running the service. + (Windows only) enum: - "default" - "process" - "hyperv" Init: - description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. type: "boolean" x-nullable: true Sysctls: @@ -2890,7 +3270,9 @@ definitions: description: "ID of the container represented by this task" type: "string" Resources: - description: "Resource requirements which apply to each individual container created as part of the service." + description: | + Resource requirements which apply to each individual container created + as part of the service. type: "object" properties: Limits: @@ -2900,7 +3282,9 @@ definitions: description: "Define resources reservation." $ref: "#/definitions/ResourceObject" RestartPolicy: - description: "Specification for the restart policy which applies to containers created as part of this service." + description: | + Specification for the restart policy which applies to containers + created as part of this service. type: "object" properties: Condition: @@ -2915,12 +3299,16 @@ definitions: type: "integer" format: "int64" MaxAttempts: - description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). type: "integer" format: "int64" default: 0 Window: - description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). type: "integer" format: "int64" default: 0 @@ -2928,7 +3316,27 @@ definitions: type: "object" properties: Constraints: - description: "An array of constraints." + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + type: "array" items: type: "string" @@ -2936,8 +3344,13 @@ definitions: - "node.hostname!=node3.corp.example.com" - "node.role!=manager" - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" Preferences: - description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. type: "array" items: type: "object" @@ -2946,7 +3359,8 @@ definitions: type: "object" properties: SpreadDescriptor: - description: "label descriptor, such as engine.labels.az" + description: | + label descriptor, such as `engine.labels.az`. type: "string" example: - Spread: @@ -2954,7 +3368,9 @@ definitions: - Spread: SpreadDescriptor: "node.labels.rack" MaxReplicas: - description: "Maximum number of replicas for per node (default value is 0, which is unlimited)" + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) type: "integer" format: "int64" default: 0 @@ -2968,24 +3384,24 @@ definitions: items: $ref: "#/definitions/Platform" ForceUpdate: - description: "A counter that triggers an update even if no relevant parameters have been changed." + description: | + A counter that triggers an update even if no relevant parameters have + been changed. type: "integer" Runtime: - description: "Runtime is the type of runtime specified for the task executor." + description: | + Runtime is the type of runtime specified for the task executor. type: "string" Networks: + description: "Specifies which networks the service should attach to." type: "array" items: - type: "object" - properties: - Target: - type: "string" - Aliases: - type: "array" - items: - type: "string" + $ref: "#/definitions/NetworkAttachmentConfig" LogDriver: - description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. type: "object" properties: Name: @@ -3168,7 +3584,9 @@ definitions: type: "object" properties: Parallelism: - description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). type: "integer" format: "int64" Delay: @@ -3176,22 +3594,32 @@ definitions: type: "integer" format: "int64" FailureAction: - description: "Action to take if an updated task fails to run, or stops running during the update." + description: | + Action to take if an updated task fails to run, or stops running + during the update. type: "string" enum: - "continue" - "pause" - "rollback" Monitor: - description: "Amount of time to monitor each updated task for failures, in nanoseconds." + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. type: "integer" format: "int64" MaxFailureRatio: - description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. type: "number" default: 0 Order: - description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. type: "string" enum: - "stop-first" @@ -3201,45 +3629,52 @@ definitions: type: "object" properties: Parallelism: - description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)." + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). type: "integer" format: "int64" Delay: - description: "Amount of time between rollback iterations, in nanoseconds." + description: | + Amount of time between rollback iterations, in nanoseconds. type: "integer" format: "int64" FailureAction: - description: "Action to take if an rolled back task fails to run, or stops running during the rollback." + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. type: "string" enum: - "continue" - "pause" Monitor: - description: "Amount of time to monitor each rolled back task for failures, in nanoseconds." + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. type: "integer" format: "int64" MaxFailureRatio: - description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1." + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. type: "number" default: 0 Order: - description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down." + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. type: "string" enum: - "stop-first" - "start-first" Networks: - description: "Array of network names or IDs to attach the service to." + description: "Specifies which networks the service should attach to." type: "array" items: - type: "object" - properties: - Target: - type: "string" - Aliases: - type: "array" - items: - type: "string" + $ref: "#/definitions/NetworkAttachmentConfig" + EndpointSpec: $ref: "#/definitions/EndpointSpec" @@ -3266,7 +3701,7 @@ definitions:


- - "ingress" makes the target port accessible on on every node, + - "ingress" makes the target port accessible on every node, regardless of whether there is a task for the service running on that node or not. - "host" bypasses the routing mesh and publish the port directly on @@ -3284,15 +3719,17 @@ definitions: type: "object" properties: Mode: - description: "The mode of resolution to use for internal load balancing - between tasks." + description: | + The mode of resolution to use for internal load balancing between tasks. type: "string" enum: - "vip" - "dnsrr" default: "vip" Ports: - description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. type: "array" items: $ref: "#/definitions/EndpointPortConfig" @@ -3536,7 +3973,7 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) data to store as secret. This field is only used to _create_ a secret, and is not returned by @@ -3544,7 +3981,9 @@ definitions: type: "string" example: "" Driver: - description: "Name of the secrets driver used to fetch the secret's value from an external secret store" + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. $ref: "#/definitions/Driver" Templating: description: | @@ -3586,7 +4025,7 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) config data. type: "string" Templating: @@ -3613,6 +4052,168 @@ definitions: Spec: $ref: "#/definitions/ConfigSpec" + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether this container has been killed because it ran out of memory. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + x-nullable: true + $ref: "#/definitions/Health" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "19.03.12" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "19.03.12" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.40" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.12" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.13.14" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "4.19.76-linuxkit" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: type: "object" properties: @@ -3740,15 +4341,20 @@ definitions: type: "boolean" example: true CpuCfsPeriod: - description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. type: "boolean" example: true CpuCfsQuota: - description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host." + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. type: "boolean" example: true CPUShares: - description: "Indicates if CPU Shares limiting is supported by the host." + description: | + Indicates if CPU Shares limiting is supported by the host. type: "boolean" example: true CPUSet: @@ -3778,7 +4384,9 @@ definitions: type: "boolean" example: true Debug: - description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled." + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. type: "boolean" example: true NFd: @@ -3809,7 +4417,7 @@ definitions: description: | The driver to use for managing cgroups. type: "string" - enum: ["cgroupfs", "systemd"] + enum: ["cgroupfs", "systemd", "none"] default: "cgroupfs" example: "cgroupfs" NEventsListener: @@ -3859,7 +4467,7 @@ definitions: example: 4 MemTotal: description: | - Total amount of physical memory available on the host, in kilobytes (kB). + Total amount of physical memory available on the host, in bytes. type: "integer" format: "int64" example: 2095882240 @@ -4044,7 +4652,7 @@ definitions: SecurityOptions: description: | List of security features that are enabled on the daemon, such as - apparmor, seccomp, SELinux, and user-namespaces (userns). + apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. Additional configuration options for each security feature may be present, and are included as a comma-separated list of key/value @@ -4057,6 +4665,7 @@ definitions: - "name=seccomp,profile=default" - "name=selinux" - "name=userns" + - "name=rootless" ProductLicense: description: | Reports a summary of the product license on the daemon. @@ -4410,37 +5019,69 @@ definitions: IP address and ports at which this node can be reached. type: "string" + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + paths: /containers/json: get: summary: "List containers" description: | - Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect). + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). - Note that it uses a different, smaller representation of a container than inspecting a single container. For example, - the list of linked containers is not propagated . + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . operationId: "ContainerList" produces: - "application/json" parameters: - name: "all" in: "query" - description: "Return all containers. By default, only running containers are shown" + description: | + Return all containers. By default, only running containers are shown. type: "boolean" default: false - name: "limit" in: "query" - description: "Return this number of most recently created containers, including non-running ones." + description: | + Return this number of most recently created containers, including + non-running ones. type: "integer" - name: "size" in: "query" - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. type: "boolean" default: false - name: "filters" in: "query" description: | - Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters: + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: - `ancestor`=(`[:]`, ``, or ``) - `before`=(`` or ``) @@ -4611,9 +5252,11 @@ paths: parameters: - name: "name" in: "query" - description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. type: "string" - pattern: "/?[a-zA-Z0-9_-]+" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" @@ -4625,14 +5268,7 @@ paths: HostConfig: $ref: "#/definitions/HostConfig" NetworkingConfig: - description: "This container's networking configuration." - type: "object" - properties: - EndpointsConfig: - description: "A mapping of network name to endpoint configuration for that network." - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" + $ref: "#/definitions/NetworkingConfig" example: Hostname: "" Domainname: "" @@ -4694,6 +5330,14 @@ paths: - {} BlkioDeviceWriteIOps: - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" MemorySwappiness: 60 OomKillDisable: false OomScoreAdj: 500 @@ -4825,54 +5469,10 @@ paths: items: type: "string" State: - description: "The state of the container." - type: "object" - properties: - Status: - description: | - The status of the container. For example, `"running"` or `"exited"`. - type: "string" - enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] - Running: - description: | - Whether this container is running. - - Note that a running container can be _paused_. The `Running` and `Paused` - booleans are not mutually exclusive: - - When pausing a container (on Linux), the cgroups freezer is used to suspend - all processes in the container. Freezing the process requires the process to - be running. As a result, paused containers are both `Running` _and_ `Paused`. - - Use the `Status` field instead to determine if a container's state is "running". - type: "boolean" - Paused: - description: "Whether this container is paused." - type: "boolean" - Restarting: - description: "Whether this container is restarting." - type: "boolean" - OOMKilled: - description: "Whether this container has been killed because it ran out of memory." - type: "boolean" - Dead: - type: "boolean" - Pid: - description: "The process ID of this container" - type: "integer" - ExitCode: - description: "The last exit code of this container" - type: "integer" - Error: - type: "string" - StartedAt: - description: "The time when this container was last started." - type: "string" - FinishedAt: - description: "The time when this container last exited." - type: "string" + x-nullable: true + $ref: "#/definitions/ContainerState" Image: - description: "The container's image" + description: "The container's image ID" type: "string" ResolvConfPath: type: "string" @@ -4891,6 +5491,8 @@ paths: type: "integer" Driver: type: "string" + Platform: + type: "string" MountLabel: type: "string" ProcessLabel: @@ -4908,7 +5510,9 @@ paths: GraphDriver: $ref: "#/definitions/GraphDriverData" SizeRw: - description: "The size of files that have been created or changed by this container." + description: | + The size of files that have been created or changed by this + container. type: "integer" format: "int64" SizeRootFs: @@ -4940,6 +5544,8 @@ paths: Domainname: "" Env: - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Healthcheck: + Test: ["CMD-SHELL", "exit 0"] Hostname: "ba033ac44011" Image: "ubuntu" Labels: @@ -4985,6 +5591,14 @@ paths: CpuRealtimePeriod: 1000000 CpuRealtimeRuntime: 10000 Devices: [] + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" IpcMode: "" LxcConf: [] Memory: 0 @@ -5051,6 +5665,14 @@ paths: Error: "" ExitCode: 9 FinishedAt: "2015-01-06T15:47:32.080254511Z" + Health: + Status: "healthy" + FailingStreak: 0 + Log: + - Start: "2019-12-22T10:59:05.6385933Z" + End: "2019-12-22T10:59:05.8078452Z" + ExitCode: 0 + Output: "" OOMKilled: false Dead: false Paused: false @@ -5093,7 +5715,9 @@ paths: /containers/{id}/top: get: summary: "List processes running inside a container" - description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. operationId: "ContainerTop" responses: 200: @@ -5109,7 +5733,9 @@ paths: items: type: "string" Processes: - description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + description: | + Each process running in the container, where each is process + is an array of values corresponding to the titles. type: "array" items: type: "array" @@ -5174,18 +5800,19 @@ paths: description: | Get `stdout` and `stderr` logs from a container. - Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. operationId: "ContainerLogs" responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" 200: - description: "logs returned as a string in response body" + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. schema: type: "string" + format: "binary" 404: description: "no such container" schema: @@ -5205,10 +5832,7 @@ paths: type: "string" - name: "follow" in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" @@ -5238,7 +5862,9 @@ paths: default: false - name: "tail" in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Container"] @@ -5344,6 +5970,16 @@ paths: If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the corresponding `cpu_usage.percpu_usage` array should be used. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] responses: @@ -5462,14 +6098,16 @@ paths: type: "string" - name: "stream" in: "query" - description: "Stream the output. If false, the stats will be output once and then it will disconnect." + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. type: "boolean" default: true tags: ["Container"] /containers/{id}/resize: post: summary: "Resize a container TTY" - description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" @@ -5497,11 +6135,11 @@ paths: type: "string" - name: "h" in: "query" - description: "Height of the tty session in characters" + description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" - description: "Width of the tty session in characters" + description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] /containers/{id}/start: @@ -5513,8 +6151,6 @@ paths: description: "no error" 304: description: "container already started" - schema: - $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: @@ -5534,7 +6170,10 @@ paths: type: "string" - name: "detachKeys" in: "query" - description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. type: "string" tags: ["Container"] /containers/{id}/stop: @@ -5546,8 +6185,6 @@ paths: description: "no error" 304: description: "container already stopped" - schema: - $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: @@ -5602,7 +6239,9 @@ paths: /containers/{id}/kill: post: summary: "Kill a container" - description: "Send a POSIX signal to a container, defaulting to killing to the container." + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. operationId: "ContainerKill" responses: 204: @@ -5640,7 +6279,9 @@ paths: /containers/{id}/update: post: summary: "Update a container" - description: "Change various configuration options of a container without having to recreate it." + description: | + Change various configuration options of a container without having to + recreate it. operationId: "ContainerUpdate" consumes: ["application/json"] produces: ["application/json"] @@ -5738,9 +6379,12 @@ paths: post: summary: "Pause a container" description: | - Use the cgroups freezer to suspend all processes in a container. + Use the freezer cgroup to suspend all processes in a container. - Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. operationId: "ContainerPause" responses: 204: @@ -5793,15 +6437,20 @@ paths: post: summary: "Attach to a container" description: | - Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. - Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. - See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. ### Hijacking - This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. This is the response from the daemon for an attach request: @@ -5812,9 +6461,11 @@ paths: [STREAM] ``` - After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. - To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. For example, the client sends this request to upgrade the connection: @@ -5824,7 +6475,8 @@ paths: Connection: Upgrade ``` - The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: ``` HTTP/1.1 101 UPGRADED @@ -5837,9 +6489,14 @@ paths: ### Stream format - When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. - The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). It is encoded on the first eight bytes like this: @@ -5853,9 +6510,11 @@ paths: - 1: `stdout` - 2: `stderr` - `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. - Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. The simplest way to implement this protocol is the following: @@ -5867,7 +6526,10 @@ paths: ### Stream format when using a TTY - When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. operationId: "ContainerAttach" produces: @@ -5900,21 +6562,28 @@ paths: type: "string" - name: "detachKeys" in: "query" - description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. type: "string" - name: "logs" in: "query" description: | Replay previous logs from the container. - This is useful for attaching to a container that has started and you want to output everything since the container started. + This is useful for attaching to a container that has started and you + want to output everything since the container started. - If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. type: "boolean" default: false - name: "stream" in: "query" - description: "Stream attached streams from the time the request was made onwards" + description: | + Stream attached streams from the time the request was made onwards. type: "boolean" default: false - name: "stdin" @@ -5965,7 +6634,10 @@ paths: type: "string" - name: "detachKeys" in: "query" - description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. type: "string" - name: "logs" in: "query" @@ -6038,7 +6710,9 @@ paths: type: "string" - name: "condition" in: "query" - description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'." + description: | + Wait until a container state reaches the given condition, either + 'not-running' (default), 'next-exit', or 'removed'. type: "string" default: "not-running" tags: ["Container"] @@ -6066,7 +6740,9 @@ paths: $ref: "#/definitions/ErrorResponse" examples: application/json: - message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove" + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove 500: description: "server error" schema: @@ -6096,7 +6772,10 @@ paths: /containers/{id}/archive: head: summary: "Get information about files in a container" - description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. operationId: "ContainerArchiveInfo" responses: 200: @@ -6104,7 +6783,9 @@ paths: headers: X-Docker-Container-Path-Stat: type: "string" - description: "A base64 - encoded JSON object with some filesystem header information about the path" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path 400: description: "Bad parameter" schema: @@ -6113,7 +6794,10 @@ paths: - type: "object" properties: message: - description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + description: | + The error message. Either "must specify path parameter" + (path cannot be empty) or "not a directory" (path was + asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: @@ -6155,7 +6839,10 @@ paths: - type: "object" properties: message: - description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + description: | + The error message. Either "must specify path parameter" + (path cannot be empty) or "not a directory" (path was + asserted to be a directory but exists as a file). type: "string" x-nullable: false 404: @@ -6221,14 +6908,27 @@ paths: type: "string" - name: "noOverwriteDirNonDir" in: "query" - description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir type: "string" - name: "inputStream" in: "body" required: true - description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. schema: type: "string" + format: "binary" tags: ["Container"] /containers/prune: post: @@ -6323,7 +7023,10 @@ paths: - name: "filters" in: "query" description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: - `before`=(`[:]`, `` or ``) - `dangling=true` @@ -6458,10 +7161,11 @@ paths: type: "string" - name: "networkmode" in: "query" - description: "Sets the networking mode for the run commands during - build. Supported standard values are: `bridge`, `host`, `none`, and - `container:`. Any other value is taken as a custom network's - name to which this container should connect to." + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. type: "string" - name: "Content-type" in: "header" @@ -6538,7 +7242,11 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - `id=` - `parent=` @@ -6606,6 +7314,10 @@ paths: in: "query" description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" - name: "inputImage" in: "body" description: "Image content if the value `-` has been specified in fromSrc query parameter" @@ -6614,7 +7326,11 @@ paths: required: false - name: "X-Registry-Auth" in: "header" - description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. type: "string" - name: "platform" in: "query" @@ -6813,7 +7529,9 @@ paths: description: | Push an image to a registry. - If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. The push is cancelled if the HTTP connection is closed. operationId: "ImagePush" @@ -6842,7 +7560,11 @@ paths: type: "string" - name: "X-Registry-Auth" in: "header" - description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. type: "string" required: true tags: ["Image"] @@ -7046,7 +7768,9 @@ paths: /auth: post: summary: "Check auth configuration" - description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. operationId: "SystemAuth" consumes: ["application/json"] produces: ["application/json"] @@ -7109,63 +7833,7 @@ paths: 200: description: "no error" schema: - type: "object" - title: "SystemVersionResponse" - properties: - Platform: - type: "object" - required: [Name] - properties: - Name: - type: "string" - Components: - type: "array" - items: - type: "object" - x-go-name: ComponentVersion - required: [Name, Version] - properties: - Name: - type: "string" - Version: - type: "string" - x-nullable: false - Details: - type: "object" - x-nullable: true - - Version: - type: "string" - ApiVersion: - type: "string" - MinAPIVersion: - type: "string" - GitCommit: - type: "string" - GoVersion: - type: "string" - Os: - type: "string" - Arch: - type: "string" - KernelVersion: - type: "string" - Experimental: - type: "boolean" - BuildTime: - type: "string" - examples: - application/json: - Version: "17.04.0" - Os: "linux" - KernelVersion: "3.19.0-23-generic" - GoVersion: "go1.7.5" - GitCommit: "deadbee" - Arch: "amd64" - ApiVersion: "1.27" - MinAPIVersion: "1.12" - BuildTime: "2016-06-14T07:09:13.444803460+00:00" - Experimental: true + $ref: "#/definitions/SystemVersion" 500: description: "server error" schema: @@ -7557,11 +8225,16 @@ paths: get: summary: "Export several images" description: | - Get a tarball containing all images and metadata for several image repositories. + Get a tarball containing all images and metadata for several image + repositories. - For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. - For details on the format, see [the export image endpoint](#operation/ImageGet). + For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageGetAll" produces: - "application/x-tar" @@ -7589,7 +8262,7 @@ paths: description: | Load a set of images and tags into a repository. - For details on the format, see [the export image endpoint](#operation/ImageGet). + For details on the format, see the [export image endpoint](#operation/ImageGet). operationId: "ImageLoad" consumes: - "application/x-tar" @@ -7662,12 +8335,16 @@ paths: description: "Attach to `stderr` of the exec command." DetachKeys: type: "string" - description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. Tty: type: "boolean" description: "Allocate a pseudo-TTY." Env: - description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + description: | + A list of environment variables in the form `["VAR=value", ...]`. type: "array" items: type: "string" @@ -7682,10 +8359,14 @@ paths: default: false User: type: "string" - description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. WorkingDir: type: "string" - description: "The working directory for the exec process inside the container." + description: | + The working directory for the exec process inside the container. example: AttachStdin: false AttachStdout: true @@ -7707,7 +8388,10 @@ paths: /exec/{id}/start: post: summary: "Start an exec instance" - description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. operationId: "ExecStart" consumes: - "application/json" @@ -7748,7 +8432,9 @@ paths: /exec/{id}/resize: post: summary: "Resize an exec instance" - description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: 201: @@ -7868,7 +8554,8 @@ paths: Warnings: type: "array" x-nullable: false - description: "Warnings that occurred when fetching the list of volumes" + description: | + Warnings that occurred when fetching the list of volumes. items: type: "string" @@ -7937,7 +8624,8 @@ paths: title: "VolumeConfig" properties: Name: - description: "The new volume's name. If not specified, Docker generates a name." + description: | + The new volume's name. If not specified, Docker generates a name. type: "string" x-nullable: false Driver: @@ -7946,7 +8634,9 @@ paths: default: "local" x-nullable: false DriverOpts: - description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. type: "object" additionalProperties: type: "string" @@ -8060,10 +8750,12 @@ paths: get: summary: "List networks" description: | - Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect). + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). - Note that it uses a different, smaller representation of a network than inspecting a single network. For example, - the list of containers attached to the network is not propagated in API versions 1.28 and up. + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. operationId: "NetworkList" produces: - "application/json" @@ -8133,7 +8825,10 @@ paths: - name: "filters" in: "query" description: | - JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: - `dangling=` When set to `true` (or `1`), returns all networks that are not in use by a container. When set to `false` @@ -8258,7 +8953,14 @@ paths: description: "The network's name." type: "string" CheckDuplicate: - description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions." + description: | + Check for networks with duplicate names. Since Network is + primarily keyed based on a random ID and not on the name, and + network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed + way to check for duplicates. CheckDuplicate is there to provide + a best effort checking of any networks which has the same name + but it is not guaranteed to catch all name collisions. type: "boolean" Driver: description: "Name of the network driver plugin to use." @@ -8268,10 +8970,14 @@ paths: description: "Restrict external access to the network." type: "boolean" Attachable: - description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode." + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. type: "boolean" Ingress: - description: "Ingress network is the network which provides the routing-mesh in swarm mode." + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. type: "boolean" IPAM: description: "Optional custom IP scheme for the network." @@ -8400,10 +9106,12 @@ paths: properties: Container: type: "string" - description: "The ID or name of the container to disconnect from the network." + description: | + The ID or name of the container to disconnect from the network. Force: type: "boolean" - description: "Force the container to disconnect from the network." + description: | + Force the container to disconnect from the network. tags: ["Network"] /networks/prune: post: @@ -8460,7 +9168,10 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: - `capability=` - `enable=|` @@ -8476,7 +9187,9 @@ paths: schema: type: "array" items: - description: "Describes a permission the user has to accept upon installing the plugin." + description: | + Describes a permission the user has to accept upon installing + the plugin. type: "object" title: "PluginPrivilegeItem" properties: @@ -8508,7 +9221,9 @@ paths: parameters: - name: "remote" in: "query" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. required: true type: "string" tags: @@ -8519,7 +9234,8 @@ paths: summary: "Install a plugin" operationId: "PluginPull" description: | - Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). produces: - "application/json" responses: @@ -8548,14 +9264,21 @@ paths: type: "string" - name: "X-Registry-Auth" in: "header" - description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. type: "string" - name: "body" in: "body" schema: type: "array" items: - description: "Describes a permission accepted by the user upon installing the plugin." + description: | + Describes a permission accepted by the user upon installing the + plugin. type: "object" properties: Name: @@ -8600,7 +9323,9 @@ paths: parameters: - name: "name" in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. required: true type: "string" tags: ["Plugin"] @@ -8624,12 +9349,16 @@ paths: parameters: - name: "name" in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. required: true type: "string" - name: "force" in: "query" - description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. type: "boolean" default: false tags: ["Plugin"] @@ -8651,7 +9380,9 @@ paths: parameters: - name: "name" in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. required: true type: "string" - name: "timeout" @@ -8678,7 +9409,9 @@ paths: parameters: - name: "name" in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. required: true type: "string" tags: ["Plugin"] @@ -8700,7 +9433,9 @@ paths: parameters: - name: "name" in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. required: true type: "string" - name: "remote" @@ -8713,14 +9448,21 @@ paths: type: "string" - name: "X-Registry-Auth" in: "header" - description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. type: "string" - name: "body" in: "body" schema: type: "array" items: - description: "Describes a permission accepted by the user upon installing the plugin." + description: | + Describes a permission accepted by the user upon installing the + plugin. type: "object" properties: Name: @@ -8761,7 +9503,9 @@ paths: parameters: - name: "name" in: "query" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. required: true type: "string" - name: "tarContext" @@ -8780,7 +9524,9 @@ paths: parameters: - name: "name" in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. required: true type: "string" responses: @@ -8804,7 +9550,9 @@ paths: parameters: - name: "name" in: "path" - description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. required: true type: "string" - name: "body" @@ -8953,7 +9701,9 @@ paths: $ref: "#/definitions/NodeSpec" - name: "version" in: "query" - description: "The version number of the node object being updated. This is required to avoid conflicting writes." + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. type: "integer" format: "int64" required: true @@ -9014,20 +9764,35 @@ paths: type: "object" properties: ListenAddr: - description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. type: "string" AdvertiseAddr: - description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | - Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, - or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` - is used. - - The `DataPathAddr` specifies the address that global scope network drivers will publish towards other - nodes in order to reach the containers running on this node. Using this parameter it is possible to - separate the container data traffic from the management traffic of the cluster. + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. type: "string" DataPathPort: description: | @@ -9038,7 +9803,8 @@ paths: format: "uint32" DefaultAddrPool: description: | - Default Address Pool specifies default subnet pools for global scope networks. + Default Address Pool specifies default subnet pools for global + scope networks. type: "array" items: type: "string" @@ -9048,7 +9814,8 @@ paths: type: "boolean" SubnetSize: description: | - SubnetSize specifies the subnet size of the networks created from the default subnet pool + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. type: "integer" format: "uint32" Spec: @@ -9095,25 +9862,40 @@ paths: type: "object" properties: ListenAddr: - description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). type: "string" AdvertiseAddr: - description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. type: "string" DataPathAddr: description: | - Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, - or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` - is used. + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same addres + as `AdvertiseAddr` is used. - The `DataPathAddr` specifies the address that global scope network drivers will publish towards other - nodes in order to reach the containers running on this node. Using this parameter it is possible to - separate the container data traffic from the management traffic of the cluster. + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. type: "string" RemoteAddrs: - description: "Addresses of manager nodes already participating in the swarm." - type: "string" + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" @@ -9141,7 +9923,9 @@ paths: $ref: "#/definitions/ErrorResponse" parameters: - name: "force" - description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. in: "query" type: "boolean" default: false @@ -9173,7 +9957,9 @@ paths: $ref: "#/definitions/SwarmSpec" - name: "version" in: "query" - description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. type: "integer" format: "int64" required: true @@ -9276,7 +10062,10 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: - `id=` - `label=` @@ -9404,7 +10193,12 @@ paths: foo: "bar" - name: "X-Registry-Auth" in: "header" - description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. type: "string" tags: ["Service"] /services/{id}: @@ -9540,28 +10334,37 @@ paths: - name: "version" in: "query" - description: "The version number of the service object being updated. - This is required to avoid conflicting writes. - This version number should be the value as currently set on the service *before* the update. - You can find the current version by calling `GET /services/{id}`" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` required: true type: "integer" - name: "registryAuthFrom" in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. type: "string" - description: "If the X-Registry-Auth header is not specified, this - parameter indicates where to find registry authorization credentials. The - valid values are `spec` and `previous-spec`." + enum: ["spec", "previous-spec"] default: "spec" - name: "rollback" in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. type: "string" - description: "Set to this parameter to `previous` to cause a - server-side rollback to the previous service spec. The supplied spec will be - ignored in this case." - name: "X-Registry-Auth" in: "header" - description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. type: "string" tags: ["Service"] @@ -9569,23 +10372,18 @@ paths: get: summary: "Get service logs" description: | - Get `stdout` and `stderr` logs from a service. + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. operationId: "ServiceLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" 200: - description: "logs returned as a string in response body" + description: "logs returned as a stream in response body" schema: type: "string" + format: "binary" 404: description: "no such service" schema: @@ -9614,10 +10412,7 @@ paths: default: false - name: "follow" in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" @@ -9642,7 +10437,9 @@ paths: default: false - name: "tail" in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Service"] @@ -9783,7 +10580,10 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: - `desired-state=(running | shutdown | accepted)` - `id=` @@ -9827,22 +10627,17 @@ paths: summary: "Get task logs" description: | Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. operationId: "TaskLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" responses: - 101: - description: "logs returned as a stream" - schema: - type: "string" - format: "binary" 200: - description: "logs returned as a string in response body" + description: "logs returned as a stream in response body" schema: type: "string" + format: "binary" 404: description: "no such task" schema: @@ -9871,10 +10666,7 @@ paths: default: false - name: "follow" in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" @@ -9899,7 +10691,9 @@ paths: default: false - name: "tail" in: "query" - description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. type: "string" default: "all" tags: ["Task"] @@ -9953,7 +10747,10 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: - `id=` - `label= or label==value` @@ -10110,10 +10907,15 @@ paths: in: "body" schema: $ref: "#/definitions/SecretSpec" - description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. - name: "version" in: "query" - description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. type: "integer" format: "int64" required: true @@ -10152,7 +10954,10 @@ paths: in: "query" type: "string" description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: - `id=` - `label= or label==value` @@ -10296,10 +11101,15 @@ paths: in: "body" schema: $ref: "#/definitions/ConfigSpec" - description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values." + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. - name: "version" in: "query" - description: "The version number of the config object being updated. This is required to avoid conflicting writes." + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. type: "integer" format: "int64" required: true @@ -10307,7 +11117,8 @@ paths: /distribution/{name}/json: get: summary: "Get image information from the registry" - description: "Return image digest and platform information by contacting the registry." + description: | + Return image digest and platform information by contacting the registry. operationId: "DistributionInspect" produces: - "application/json" @@ -10322,7 +11133,8 @@ paths: properties: Descriptor: type: "object" - description: "A descriptor struct containing digest, media type, and size" + description: | + A descriptor struct containing digest, media type, and size. properties: MediaType: type: "string" @@ -10337,7 +11149,8 @@ paths: type: "string" Platforms: type: "array" - description: "An array containing all platforms supported by the image" + description: | + An array containing all platforms supported by the image. items: type: "object" properties: @@ -10396,14 +11209,13 @@ paths: post: summary: "Initialize interactive session" description: | - Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities. - - > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental - > features enabled. The specifications for this endpoint may still change in a future version of the API. + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. ### Hijacking - This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection. + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. For example, the client sends this request to upgrade the connection: @@ -10413,7 +11225,8 @@ paths: Connection: Upgrade ``` - The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream: + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: ``` HTTP/1.1 101 UPGRADED @@ -10434,4 +11247,4 @@ paths: description: "server error" schema: $ref: "#/definitions/ErrorResponse" - tags: ["Session (experimental)"] + tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 4b9f50282b1f..fe90617eec35 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -50,7 +50,7 @@ type ContainerCommitOptions struct { // ContainerExecInspect holds information returned by exec inspect. type ContainerExecInspect struct { - ExecID string + ExecID string `json:"ID"` ContainerID string Running bool ExitCode int diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go index c909d6ca3e9e..222d141007ec 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go index 49efa0f2c093..1ec9c3728ba8 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go index ba41edcf3f84..bd34cd6b6928 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -11,7 +11,9 @@ package container // swagger:model ContainerTopOKBody type ContainerTopOKBody struct { - // Each process running in the container, where each is process is an array of values corresponding to the titles + // Each process running in the container, where each is process + // is an array of values corresponding to the titles. + // // Required: true Processes [][]string `json:"Processes"` diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go index 7630ae54cd6d..33addedf7791 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go index 9e3910a6b42e..94b6a20e159b 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index c710107702b8..c3de3d976a57 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -338,7 +338,6 @@ type Resources struct { Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup DeviceRequests []DeviceRequest // List of device requests for device drivers - DiskQuota int64 // Disk limit (in bytes) KernelMemory int64 // Kernel memory limit (in bytes) KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index d8f19ae227c4..0bd2e1e1853b 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -5,7 +5,6 @@ package filters // import "github.com/docker/docker/api/types/filters" import ( "encoding/json" - "errors" "regexp" "strings" @@ -37,39 +36,13 @@ func NewArgs(initialArgs ...KeyValuePair) Args { return args } -// ParseFlag parses a key=value string and adds it to an Args. -// -// Deprecated: Use Args.Add() -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat +// Keys returns all the keys in list of Args +func (args Args) Keys() []string { + keys := make([]string, 0, len(args.fields)) + for k := range args.fields { + keys = append(keys, k) } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned when a filter is not in the form key=value -// -// Deprecated: this error will be removed in a future version -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam encodes the Args as args JSON encoded string -// -// Deprecated: use ToJSON -func ToParam(a Args) (string, error) { - return ToJSON(a) + return keys } // MarshalJSON returns a JSON byte representation of the Args @@ -107,13 +80,6 @@ func ToParamWithVersion(version string, a Args) (string, error) { return ToJSON(a) } -// FromParam decodes a JSON encoded string into Args -// -// Deprecated: use FromJSON -func FromParam(p string) (Args, error) { - return FromJSON(p) -} - // FromJSON decodes a JSON encoded string into Args func FromJSON(p string) (Args, error) { args := NewArgs() @@ -275,14 +241,6 @@ func (args Args) FuzzyMatch(key, source string) bool { return false } -// Include returns true if the key exists in the mapping -// -// Deprecated: use Contains -func (args Args) Include(field string) bool { - _, ok := args.fields[field] - return ok -} - // Contains returns true if the key exists in the mapping func (args Args) Contains(field string) bool { _, ok := args.fields[field] diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go index d6b354bcdf38..b5a7a0c4901b 100644 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -1,4 +1,4 @@ -package image +package image // import "github.com/docker/docker/api/types/image" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go index b5ee96a50058..c69b08448df4 100644 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -27,10 +27,13 @@ type Volume struct { Name string `json:"Name"` // The driver specific options used when creating the volume. + // // Required: true Options map[string]string `json:"Options"` - // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // The level at which the volume exists. Either `global` for cluster-wide, + // or `local` for machine level. + // // Required: true Scope string `json:"Scope"` diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go index f12e48612c56..700aa804f851 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -15,7 +15,9 @@ type VolumeCreateBody struct { // Required: true Driver string `json:"Driver"` - // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // // Required: true DriverOpts map[string]string `json:"DriverOpts"` @@ -24,6 +26,7 @@ type VolumeCreateBody struct { Labels map[string]string `json:"Labels"` // The new volume's name. If not specified, Docker generates a name. + // // Required: true Name string `json:"Name"` } diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go index 020198f7371b..5a79ad64370f 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -17,7 +17,8 @@ type VolumeListOKBody struct { // Required: true Volumes []*types.Volume `json:"Volumes"` - // Warnings that occurred when fetching the list of volumes + // Warnings that occurred when fetching the list of volumes. + // // Required: true Warnings []string `json:"Warnings"` } diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 0b7b4d953c95..b63d4d6d4986 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -81,6 +81,15 @@ type Client struct { customHTTPHeaders map[string]string // manualOverride is set to true when the version was set by users. manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool } // CheckRedirect specifies the policy for dealing with redirect responses: @@ -107,7 +116,7 @@ func CheckRedirect(req *http.Request, via []*http.Request) error { // It won't send any version information if the version number is empty. It is // highly recommended that you set a version or your client may break if the // server is upgraded. -func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { +func NewClientWithOpts(ops ...Opt) (*Client, error) { client, err := defaultHTTPClient(DefaultDockerHost) if err != nil { return nil, err @@ -169,8 +178,11 @@ func (cli *Client) Close() error { // getAPIPath returns the versioned request path to call the api. // It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") apiPath = path.Join(cli.basePath, "/v"+v, p) @@ -186,19 +198,31 @@ func (cli *Client) ClientVersion() string { } // NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. +// API version. Any errors are silently ignored. If a manual override is in place, +// either through the `DOCKER_API_VERSION` environment variable, or if the client +// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation +// will be performed. func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - ping, _ := cli.Ping(ctx) - cli.NegotiateAPIVersionPing(ping) + if !cli.manualOverride { + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) + } } // NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. +// if the ping version is less than the default version. If a manual override is +// in place, either through the `DOCKER_API_VERSION` environment variable, or if +// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no +// negotiation is performed. func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { - if cli.manualOverride { - return + if !cli.manualOverride { + cli.negotiateAPIVersionPing(p) } +} +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) negotiateAPIVersionPing(p types.Ping) { // try the latest version before versioning headers existed if p.APIVersion == "" { p.APIVersion = "1.24" @@ -213,6 +237,12 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { if versions.LessThan(p.APIVersion, cli.version) { cli.version = p.APIVersion } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } } // DaemonHost returns the host address used by the client diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 3d24470ba3aa..178ff67409a1 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -1,4 +1,4 @@ -// +build linux freebsd openbsd darwin +// +build linux freebsd openbsd netbsd darwin dragonfly package client // import "github.com/docker/docker/client" diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 0ac8248f2c7b..e9c9a752f83f 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -23,7 +23,7 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{}, err } - apiPath := cli.getAPIPath(path, query) + apiPath := cli.getAPIPath(ctx, path, query) req, err := http.NewRequest("POST", apiPath, bodyEncoded) if err != nil { return types.HijackedResponse{}, err @@ -38,6 +38,17 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err } +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", url, nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(ctx, req, proto) +} + // fallbackDial is used when WithDialer() was not called. // See cli.Dialer(). func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index d190f8e58db8..cde64be4b56b 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -38,7 +38,7 @@ type CommonAPIClient interface { ServerVersion(ctx context.Context) (types.Version, error) NegotiateAPIVersion(ctx context.Context) NegotiateAPIVersionPing(types.Ping) - DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) Dialer() func(context.Context) (net.Conn, error) Close() error } diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index 089be0d50ddd..6f77f0955f69 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -6,12 +6,16 @@ import ( "net/http" "os" "path/filepath" + "time" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" ) +// Opt is a configuration option to initialize a client +type Opt func(*Client) error + // FromEnv configures the client with values from environment variables. // // Supported environment variables: @@ -55,13 +59,13 @@ func FromEnv(c *Client) error { // WithDialer applies the dialer.DialContext to the client transport. This can be // used to set the Timeout and KeepAlive settings of the client. // Deprecated: use WithDialContext -func WithDialer(dialer *net.Dialer) func(*Client) error { +func WithDialer(dialer *net.Dialer) Opt { return WithDialContext(dialer.DialContext) } // WithDialContext applies the dialer to the client transport. This can be // used to set the Timeout and KeepAlive settings of the client. -func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) func(*Client) error { +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { return func(c *Client) error { if transport, ok := c.client.Transport.(*http.Transport); ok { transport.DialContext = dialContext @@ -72,7 +76,7 @@ func WithDialContext(dialContext func(ctx context.Context, network, addr string) } // WithHost overrides the client host with the specified one. -func WithHost(host string) func(*Client) error { +func WithHost(host string) Opt { return func(c *Client) error { hostURL, err := ParseHostURL(host) if err != nil { @@ -90,7 +94,7 @@ func WithHost(host string) func(*Client) error { } // WithHTTPClient overrides the client http client with the specified one -func WithHTTPClient(client *http.Client) func(*Client) error { +func WithHTTPClient(client *http.Client) Opt { return func(c *Client) error { if client != nil { c.client = client @@ -99,8 +103,16 @@ func WithHTTPClient(client *http.Client) func(*Client) error { } } +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + // WithHTTPHeaders overrides the client default http headers -func WithHTTPHeaders(headers map[string]string) func(*Client) error { +func WithHTTPHeaders(headers map[string]string) Opt { return func(c *Client) error { c.customHTTPHeaders = headers return nil @@ -108,7 +120,7 @@ func WithHTTPHeaders(headers map[string]string) func(*Client) error { } // WithScheme overrides the client scheme with the specified one -func WithScheme(scheme string) func(*Client) error { +func WithScheme(scheme string) Opt { return func(c *Client) error { c.scheme = scheme return nil @@ -116,7 +128,7 @@ func WithScheme(scheme string) func(*Client) error { } // WithTLSClientConfig applies a tls config to the client transport. -func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) error { +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { return func(c *Client) error { opts := tlsconfig.Options{ CAFile: cacertPath, @@ -136,11 +148,25 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) err } } -// WithVersion overrides the client version with the specified one -func WithVersion(version string) func(*Client) error { +// WithVersion overrides the client version with the specified one. If an empty +// version is specified, the value will be ignored to allow version negotiation. +func WithVersion(version string) Opt { + return func(c *Client) error { + if version != "" { + c.version = version + c.manualOverride = true + } + return nil + } +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { return func(c *Client) error { - c.version = version - c.manualOverride = true + c.negotiateVersion = true return nil } } diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index 4553e0fb718f..90f39ec14f92 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -31,6 +31,8 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { // Server handled the request, so parse the response return parsePingResponse(cli, serverResp) } + } else if IsErrConnectionFailed(err) { + return ping, err } req, err = cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 0afe26d5880a..44e0a22c4f60 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -115,7 +115,7 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea } func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { return serverResponse{}, err } @@ -143,8 +143,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp // Don't decorate context sentinel errors; users may be comparing to // them directly. - switch err { - case context.Canceled, context.DeadlineExceeded: + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return serverResp, err } @@ -178,7 +177,13 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp // this is localised - for example in French the error would be // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.") + } else { + f.Close() + err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.") + } } return serverResp, errors.Wrap(err, "error during connect") diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go deleted file mode 100644 index df199f3d03ab..000000000000 --- a/vendor/github.com/docker/docker/client/session.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net" - "net/http" -) - -// DialSession returns a connection that can be used communication with daemon -func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest("POST", "/session", nil) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, meta) - - return cli.setupHijackConn(ctx, req, proto) -} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index ac9bf6d33e67..07552f1cc1dd 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" + containerderrors "github.com/containerd/containerd/errdefs" "github.com/docker/distribution/registry/api/errcode" "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" @@ -47,6 +48,10 @@ func GetHTTPErrorStatusCode(err error) int { if statusCode != http.StatusInternalServerError { return statusCode } + statusCode = statusCodeFromContainerdError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } statusCode = statusCodeFromDistributionError(err) if statusCode != http.StatusInternalServerError { return statusCode @@ -136,9 +141,6 @@ func statusCodeFromGRPCError(err error) int { case codes.Unavailable: // code 14 return http.StatusServiceUnavailable default: - if e, ok := err.(causer); ok { - return statusCodeFromGRPCError(e.Cause()) - } // codes.Canceled(1) // codes.Unknown(2) // codes.DeadlineExceeded(4) @@ -163,10 +165,27 @@ func statusCodeFromDistributionError(err error) int { } case errcode.ErrorCoder: return errs.ErrorCode().Descriptor().HTTPStatusCode - default: - if e, ok := err.(causer); ok { - return statusCodeFromDistributionError(e.Cause()) - } } return http.StatusInternalServerError } + +// statusCodeFromContainerdError returns status code for containerd errors when +// consumed directly (not through gRPC) +func statusCodeFromContainerdError(err error) int { + switch { + case containerderrors.IsInvalidArgument(err): + return http.StatusBadRequest + case containerderrors.IsNotFound(err): + return http.StatusNotFound + case containerderrors.IsAlreadyExists(err): + return http.StatusConflict + case containerderrors.IsFailedPrecondition(err): + return http.StatusPreconditionFailed + case containerderrors.IsUnavailable(err): + return http.StatusServiceUnavailable + case containerderrors.IsNotImplemented(err): + return http.StatusNotImplemented + default: + return http.StatusInternalServerError + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 0601f7b0d1f5..c0f81ac3d68f 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -151,7 +151,9 @@ func mknodChar0Overlay(cleansedOriginalPath string) error { if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil { return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy) } - mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // lowerdir needs ":" to be escaped: https://github.com/moby/moby/issues/40939#issuecomment-627098286 + lowerEscaped := strings.ReplaceAll(lower, ":", "\\:") + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerEscaped, upper, work) // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) @@ -236,7 +238,9 @@ func createDirWithOverlayOpaque(tmp string) (string, error) { if err := os.MkdirAll(lowerDummy, 0700); err != nil { return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy) } - mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work) + // lowerdir needs ":" to be escaped: https://github.com/moby/moby/issues/40939#issuecomment-627098286 + lowerEscaped := strings.ReplaceAll(lower, ":", "\\:") + mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerEscaped, upper, work) // docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead. if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil { return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged) diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go index 1eec912b7b0b..d626336032ba 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -7,6 +7,7 @@ import ( "errors" "os" "path/filepath" + "strings" "syscall" "github.com/docker/docker/pkg/idtools" @@ -26,7 +27,7 @@ func fixVolumePathPrefix(srcPath string) string { // can't use filepath.Join(srcPath,include) because this will clean away // a trailing "." or "/" which may be important. func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include + return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include } // CanonicalTarNameForPath returns platform-specific filepath diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index 230422eac827..b3af7a4226e6 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -4,7 +4,6 @@ import ( "bufio" "fmt" "os" - "sort" "strconv" "strings" ) @@ -203,8 +202,6 @@ func (i *IdentityMapping) GIDs() []IDMap { func createIDMap(subidRanges ranges) []IDMap { idMap := []IDMap{} - // sort the ranges by lowest ID first - sort.Sort(subidRanges) containerID := 0 for _, idrange := range subidRanges { idMap = append(idMap, IDMap{ diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go index 4afd63c427b3..be0631c630ca 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -102,13 +102,13 @@ func Mounted(mountpoint string) (bool, error) { // specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See // flags.go for supported option flags. func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) + flag, data := parseOptions(options) if flag&REMOUNT != REMOUNT { if mounted, err := Mounted(target); err != nil || mounted { return err } } - return ForceMount(device, target, mType, options) + return mount(device, target, mType, uintptr(flag), data) } // ForceMount will mount a filesystem according to the specified configuration, diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go index c1dba01fc31c..fe6e3ddba132 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -7,16 +7,21 @@ import ( "os" "strconv" "strings" + + "github.com/pkg/errors" ) func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { s := bufio.NewScanner(r) out := []*Info{} + var err error for s.Scan() { - if err := s.Err(); err != nil { + if err = s.Err(); err != nil { return nil, err } /* + See http://man7.org/linux/man-pages/man5/proc.5.html + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) @@ -52,8 +57,15 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { p.Major, _ = strconv.Atoi(mm[0]) p.Minor, _ = strconv.Atoi(mm[1]) - p.Root = fields[3] - p.Mountpoint = fields[4] + p.Root, err = strconv.Unquote(`"` + fields[3] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote root field", fields[3]) + } + + p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote mount point field", fields[4]) + } p.Opts = fields[5] var skip, stop bool diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go index 8a100f0bc85a..db3882874aea 100644 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -3,49 +3,49 @@ package mount // import "github.com/docker/docker/pkg/mount" // MakeShared ensures a mounted filesystem has the SHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") + return ensureMountedAs(mountPoint, SHARED) } // MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. // See the supported options in flags.go for further reference. func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") + return ensureMountedAs(mountPoint, RSHARED) } // MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. // See the supported options in flags.go for further reference. func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") + return ensureMountedAs(mountPoint, PRIVATE) } // MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option // enabled. See the supported options in flags.go for further reference. func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") + return ensureMountedAs(mountPoint, RPRIVATE) } // MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") + return ensureMountedAs(mountPoint, SLAVE) } // MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. // See the supported options in flags.go for further reference. func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") + return ensureMountedAs(mountPoint, RSLAVE) } // MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option // enabled. See the supported options in flags.go for further reference. func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") + return ensureMountedAs(mountPoint, UNBINDABLE) } // MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount // option enabled. See the supported options in flags.go for further reference. func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") + return ensureMountedAs(mountPoint, RUNBINDABLE) } // MakeMount ensures that the file or directory given is a mount point, @@ -59,13 +59,13 @@ func MakeMount(mnt string) error { return nil } - return Mount(mnt, mnt, "none", "bind") + return mount(mnt, mnt, "none", uintptr(BIND), "") } -func ensureMountedAs(mountPoint, options string) error { - if err := MakeMount(mountPoint); err != nil { +func ensureMountedAs(mnt string, flags int) error { + if err := MakeMount(mnt); err != nil { return err } - return ForceMount("", mountPoint, "none", options) + return mount("", mnt, "none", uintptr(flags), "") } diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go index 7f675012854b..f303aa906392 100644 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -3,6 +3,7 @@ package system // import "github.com/docker/docker/pkg/system" import ( "os" + "github.com/Microsoft/hcsshim/osversion" "github.com/sirupsen/logrus" ) @@ -15,10 +16,10 @@ var ( containerdRuntimeSupported = false ) -// InitLCOW sets whether LCOW is supported or not +// InitLCOW sets whether LCOW is supported or not. Requires RS5+ func InitLCOW(experimental bool) { v := GetOSVersion() - if experimental && v.Build >= 16299 { + if experimental && v.Build >= osversion.RS5 { lcowSupported = true } } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 98c9eb18d18d..17d5d131a3c8 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -8,7 +8,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: s.Rdev, + // the type is 32bit on mips + rdev: uint64(s.Rdev), // nolint: unconvert mtim: s.Mtim}, nil } diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index 66d4895b27ab..d4f1a57fb06e 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -6,19 +6,28 @@ import "golang.org/x/sys/unix" // and associated with the given path in the file system. // It will returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array dest := make([]byte, 128) sz, errno := unix.Lgetxattr(path, attr, dest) - if errno == unix.ENODATA { + + switch { + case errno == unix.ENODATA: return nil, nil - } - if errno == unix.ERANGE { + case errno == unix.ERANGE: + // 128 byte array might just not be good enough. A dummy buffer is used + // to get the real size of the xattrs on disk + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } dest = make([]byte, sz) sz, errno = unix.Lgetxattr(path, attr, dest) - } - if errno != nil { + if errno != nil { + return nil, errno + } + case errno != nil: return nil, errno } - return dest[:sz], nil } diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index 4d5f5ae63afd..bb7e4e336950 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -113,7 +113,7 @@ func SplitProtoPort(rawPort string) (string, string) { } func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp"} { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { if availableProto == proto { return true } diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go index 9ca974539a77..1ff81c333c36 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -4,7 +4,6 @@ package tlsconfig import ( "crypto/x509" - ) // SystemCertPool returns an new empty cert pool, diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 1b31bbb8b1b9..0ef3fdcb4690 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -46,8 +46,6 @@ var acceptedCBCCiphers = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, } // DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls @@ -65,22 +63,34 @@ var allTLSVersions = map[uint16]struct{}{ } // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, CipherSuites: DefaultServerAcceptedCiphers, } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig } // ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault() *tls.Config { - return &tls.Config{ +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ // Prefer TLS1.2 as the client minimum MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig } // certPool returns an X.509 certificate pool from `caFile`, the certificate file. diff --git a/vendor/github.com/form3tech-oss/jwt-go/.gitignore b/vendor/github.com/form3tech-oss/jwt-go/.gitignore new file mode 100644 index 000000000000..c0e81a8d9267 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +bin +.idea/ + + diff --git a/vendor/github.com/form3tech-oss/jwt-go/.travis.yml b/vendor/github.com/form3tech-oss/jwt-go/.travis.yml new file mode 100644 index 000000000000..3c7fb7e1ae64 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/.travis.yml @@ -0,0 +1,12 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - 1.12 + - 1.13 + - 1.14 + - 1.15 + - tip diff --git a/vendor/github.com/form3tech-oss/jwt-go/LICENSE b/vendor/github.com/form3tech-oss/jwt-go/LICENSE new file mode 100644 index 000000000000..df83a9c2f019 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 000000000000..7fc1f793cbc4 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/form3tech-oss/jwt-go/README.md b/vendor/github.com/form3tech-oss/jwt-go/README.md new file mode 100644 index 000000000000..d7749077fde6 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/README.md @@ -0,0 +1,104 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md b/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md new file mode 100644 index 000000000000..6370298313a6 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/form3tech-oss/jwt-go/claims.go b/vendor/github.com/form3tech-oss/jwt-go/claims.go new file mode 100644 index 000000000000..624890666c66 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/claims.go @@ -0,0 +1,136 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience []string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + return true + } + } + return false +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/doc.go b/vendor/github.com/form3tech-oss/jwt-go/doc.go new file mode 100644 index 000000000000..a86dc1a3b348 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go b/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go new file mode 100644 index 000000000000..f977381240e3 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go b/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go new file mode 100644 index 000000000000..db9f4be7d8ea --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/errors.go b/vendor/github.com/form3tech-oss/jwt-go/errors.go new file mode 100644 index 000000000000..1c93024aad2e --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/hmac.go b/vendor/github.com/form3tech-oss/jwt-go/hmac.go new file mode 100644 index 000000000000..addbe5d40182 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/map_claims.go b/vendor/github.com/form3tech-oss/jwt-go/map_claims.go new file mode 100644 index 000000000000..90ab6bea350a --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/map_claims.go @@ -0,0 +1,102 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, ok := m["aud"].([]string) + if !ok { + strAud, ok := m["aud"].(string) + if !ok { + return false + } + aud = append(aud, strAud) + } + + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/none.go b/vendor/github.com/form3tech-oss/jwt-go/none.go new file mode 100644 index 000000000000..f04d189d067b --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/parser.go b/vendor/github.com/form3tech-oss/jwt-go/parser.go new file mode 100644 index 000000000000..d6901d9adb52 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa.go b/vendor/github.com/form3tech-oss/jwt-go/rsa.go new file mode 100644 index 000000000000..e4caf1ca4a11 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go b/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go new file mode 100644 index 000000000000..c01470864803 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go @@ -0,0 +1,142 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go b/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go new file mode 100644 index 000000000000..14c78c292a94 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/signing_method.go b/vendor/github.com/form3tech-oss/jwt-go/signing_method.go new file mode 100644 index 000000000000..ed1f212b21e1 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/token.go b/vendor/github.com/form3tech-oss/jwt-go/token.go new file mode 100644 index 000000000000..d637e0867c65 --- /dev/null +++ b/vendor/github.com/form3tech-oss/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml deleted file mode 100644 index 841c4281e23d..000000000000 --- a/vendor/github.com/go-openapi/analysis/.codecov.yml +++ /dev/null @@ -1,5 +0,0 @@ -coverage: - status: - patch: - default: - target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore deleted file mode 100644 index 87c3bd3e66e0..000000000000 --- a/vendor/github.com/go-openapi/analysis/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -secrets.yml -coverage.out -coverage.txt -*.cov -.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml deleted file mode 100644 index 76af8ab1c876..000000000000 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 40 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - gochecknoinits - # scopelint is useful, but also reports false positives - # that unfortunately can't be disabled. So we disable the - # linter rather than changing code that works. - # see: https://github.com/kyoh86/scopelint/issues/4 - - scopelint diff --git a/vendor/github.com/go-openapi/analysis/.travis.yml b/vendor/github.com/go-openapi/analysis/.travis.yml deleted file mode 100644 index 7ecf865c21c4..000000000000 --- a/vendor/github.com/go-openapi/analysis/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e37a..000000000000 --- a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md deleted file mode 100644 index efafdf8fd328..000000000000 --- a/vendor/github.com/go-openapi/analysis/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# OpenAPI initiative analysis [![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) [![Build status](https://ci.appveyor.com/api/projects/status/x377t5o9ennm847o/branch/master?svg=true)](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master) [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/analysis.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) - - -A foundational library to analyze an OAI specification document for easier reasoning about the content. diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go deleted file mode 100644 index 4d98718c4e62..000000000000 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ /dev/null @@ -1,970 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - slashpath "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -type referenceAnalysis struct { - schemas map[string]spec.Ref - responses map[string]spec.Ref - parameters map[string]spec.Ref - items map[string]spec.Ref - headerItems map[string]spec.Ref - parameterItems map[string]spec.Ref - allRefs map[string]spec.Ref - pathItems map[string]spec.Ref -} - -func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { - r.allRefs["#"+key] = ref -} - -func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { - r.items["#"+key] = items.Ref - r.addRef(key, items.Ref) - if location == "header" { - // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas - // and $ref are not supported here. However it is possible to analyze this. - r.headerItems["#"+key] = items.Ref - } else { - r.parameterItems["#"+key] = items.Ref - } -} - -func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { - r.schemas["#"+key] = ref.Schema.Ref - r.addRef(key, ref.Schema.Ref) -} - -func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { - r.responses["#"+key] = resp.Ref - r.addRef(key, resp.Ref) -} - -func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { - r.parameters["#"+key] = param.Ref - r.addRef(key, param.Ref) -} - -func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { - r.pathItems["#"+key] = pathItem.Ref - r.addRef(key, pathItem.Ref) -} - -type patternAnalysis struct { - parameters map[string]string - headers map[string]string - items map[string]string - schemas map[string]string - allPatterns map[string]string -} - -func (p *patternAnalysis) addPattern(key, pattern string) { - p.allPatterns["#"+key] = pattern -} - -func (p *patternAnalysis) addParameterPattern(key, pattern string) { - p.parameters["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addHeaderPattern(key, pattern string) { - p.headers["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addItemsPattern(key, pattern string) { - p.items["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addSchemaPattern(key, pattern string) { - p.schemas["#"+key] = pattern - p.addPattern(key, pattern) -} - -type enumAnalysis struct { - parameters map[string][]interface{} - headers map[string][]interface{} - items map[string][]interface{} - schemas map[string][]interface{} - allEnums map[string][]interface{} -} - -func (p *enumAnalysis) addEnum(key string, enum []interface{}) { - p.allEnums["#"+key] = enum -} - -func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { - p.parameters["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { - p.headers["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { - p.items["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { - p.schemas["#"+key] = enum - p.addEnum(key, enum) -} - -// New takes a swagger spec object and returns an analyzed spec document. -// The analyzed document contains a number of indices that make it easier to -// reason about semantics of a swagger specification for use in code generation -// or validation etc. -func New(doc *spec.Swagger) *Spec { - a := &Spec{ - spec: doc, - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - a.reset() - a.initialize() - return a -} - -// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry -// with a bunch of utility methods to act on the information in the spec. -type Spec struct { - spec *spec.Swagger - consumes map[string]struct{} - produces map[string]struct{} - authSchemes map[string]struct{} - operations map[string]map[string]*spec.Operation - references referenceAnalysis - patterns patternAnalysis - enums enumAnalysis - allSchemas map[string]SchemaRef - allOfs map[string]SchemaRef -} - -func (s *Spec) reset() { - s.consumes = make(map[string]struct{}, 150) - s.produces = make(map[string]struct{}, 150) - s.authSchemes = make(map[string]struct{}, 150) - s.operations = make(map[string]map[string]*spec.Operation, 150) - s.allSchemas = make(map[string]SchemaRef, 150) - s.allOfs = make(map[string]SchemaRef, 150) - s.references.schemas = make(map[string]spec.Ref, 150) - s.references.pathItems = make(map[string]spec.Ref, 150) - s.references.responses = make(map[string]spec.Ref, 150) - s.references.parameters = make(map[string]spec.Ref, 150) - s.references.items = make(map[string]spec.Ref, 150) - s.references.headerItems = make(map[string]spec.Ref, 150) - s.references.parameterItems = make(map[string]spec.Ref, 150) - s.references.allRefs = make(map[string]spec.Ref, 150) - s.patterns.parameters = make(map[string]string, 150) - s.patterns.headers = make(map[string]string, 150) - s.patterns.items = make(map[string]string, 150) - s.patterns.schemas = make(map[string]string, 150) - s.patterns.allPatterns = make(map[string]string, 150) - s.enums.parameters = make(map[string][]interface{}, 150) - s.enums.headers = make(map[string][]interface{}, 150) - s.enums.items = make(map[string][]interface{}, 150) - s.enums.schemas = make(map[string][]interface{}, 150) - s.enums.allEnums = make(map[string][]interface{}, 150) -} - -func (s *Spec) reload() { - s.reset() - s.initialize() -} - -func (s *Spec) initialize() { - for _, c := range s.spec.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range s.spec.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range s.spec.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - for path, pathItem := range s.AllPaths() { - s.analyzeOperations(path, &pathItem) - } - - for name, parameter := range s.spec.Parameters { - refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) - if parameter.Items != nil { - s.analyzeItems("items", parameter.Items, refPref, "parameter") - } - if parameter.In == "body" && parameter.Schema != nil { - s.analyzeSchema("schema", *parameter.Schema, refPref) - } - if parameter.Pattern != "" { - s.patterns.addParameterPattern(refPref, parameter.Pattern) - } - if len(parameter.Enum) > 0 { - s.enums.addParameterEnum(refPref, parameter.Enum) - } - } - - for name, response := range s.spec.Responses { - refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) - for k, v := range response.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - if v.Items != nil { - s.analyzeItems("items", v.Items, hRefPref, "header") - } - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - if response.Schema != nil { - s.analyzeSchema("schema", *response.Schema, refPref) - } - } - - for name, schema := range s.spec.Definitions { - s.analyzeSchema(name, schema, "/definitions") - } - // TODO: after analyzing all things and flattening schemas etc - // resolve all the collected references to their final representations - // best put in a separate method because this could get expensive -} - -func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { - // TODO: resolve refs here? - // Currently, operations declared via pathItem $ref are known only after expansion - op := pi - if pi.Ref.String() != "" { - key := slashpath.Join("/paths", jsonpointer.Escape(path)) - s.references.addPathItemRef(key, pi) - } - s.analyzeOperation("GET", path, op.Get) - s.analyzeOperation("PUT", path, op.Put) - s.analyzeOperation("POST", path, op.Post) - s.analyzeOperation("PATCH", path, op.Patch) - s.analyzeOperation("DELETE", path, op.Delete) - s.analyzeOperation("HEAD", path, op.Head) - s.analyzeOperation("OPTIONS", path, op.Options) - for i, param := range op.Parameters { - refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - if param.Items != nil { - s.analyzeItems("items", param.Items, refPref, "parameter") - } - if param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } -} - -func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { - if items == nil { - return - } - refPref := slashpath.Join(prefix, name) - s.analyzeItems(name, items.Items, refPref, location) - if items.Ref.String() != "" { - s.references.addItemsRef(refPref, items, location) - } - if items.Pattern != "" { - s.patterns.addItemsPattern(refPref, items.Pattern) - } - if len(items.Enum) > 0 { - s.enums.addItemsEnum(refPref, items.Enum) - } -} - -func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { - if op == nil { - return - } - - for _, c := range op.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range op.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range op.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - if _, ok := s.operations[method]; !ok { - s.operations[method] = make(map[string]*spec.Operation) - } - s.operations[method][path] = op - prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) - for i, param := range op.Parameters { - refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) - } - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - s.analyzeItems("items", param.Items, refPref, "parameter") - if param.In == "body" && param.Schema != nil { - s.analyzeSchema("schema", *param.Schema, refPref) - } - } - if op.Responses != nil { - if op.Responses.Default != nil { - refPref := slashpath.Join(prefix, "responses", "default") - if op.Responses.Default.Ref.String() != "" { - s.references.addResponseRef(refPref, op.Responses.Default) - } - for k, v := range op.Responses.Default.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - } - if op.Responses.Default.Schema != nil { - s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref) - } - } - for k, res := range op.Responses.StatusCodeResponses { - refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, &res) - } - for k, v := range res.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - if res.Schema != nil { - s.analyzeSchema("schema", *res.Schema, refPref) - } - } - } -} - -func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) { - refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) - schRef := SchemaRef{ - Name: name, - Schema: &schema, - Ref: spec.MustCreateRef("#" + refURI), - TopLevel: prefix == "/definitions", - } - - s.allSchemas["#"+refURI] = schRef - - if schema.Ref.String() != "" { - s.references.addSchemaRef(refURI, schRef) - } - if schema.Pattern != "" { - s.patterns.addSchemaPattern(refURI, schema.Pattern) - } - if len(schema.Enum) > 0 { - s.enums.addSchemaEnum(refURI, schema.Enum) - } - - for k, v := range schema.Definitions { - s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions")) - } - for k, v := range schema.Properties { - s.analyzeSchema(k, v, slashpath.Join(refURI, "properties")) - } - for k, v := range schema.PatternProperties { - // NOTE: swagger 2.0 does not support PatternProperties. - // However it is possible to analyze this in a schema - s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties")) - } - for i, v := range schema.AllOf { - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) - } - if len(schema.AllOf) > 0 { - s.allOfs["#"+refURI] = schRef - } - for i, v := range schema.AnyOf { - // NOTE: swagger 2.0 does not support anyOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) - } - for i, v := range schema.OneOf { - // NOTE: swagger 2.0 does not support oneOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) - } - if schema.Not != nil { - // NOTE: swagger 2.0 does not support "not" constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema("not", *schema.Not, refURI) - } - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI) - } - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - // NOTE: swagger 2.0 does not support AdditionalItems. - // However it is possible to analyze this in a schema - s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI) - } - if schema.Items != nil { - if schema.Items.Schema != nil { - s.analyzeSchema("items", *schema.Items.Schema, refURI) - } - for i, sch := range schema.Items.Schemas { - s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) - } - } -} - -// SecurityRequirement is a representation of a security requirement for an operation -type SecurityRequirement struct { - Name string - Scopes []string -} - -// SecurityRequirementsFor gets the security requirements for the operation -func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { - if s.spec.Security == nil && operation.Security == nil { - return nil - } - - schemes := s.spec.Security - if operation.Security != nil { - schemes = operation.Security - } - - result := [][]SecurityRequirement{} - for _, scheme := range schemes { - if len(scheme) == 0 { - // append a zero object for anonymous - result = append(result, []SecurityRequirement{{}}) - continue - } - var reqs []SecurityRequirement - for k, v := range scheme { - if v == nil { - v = []string{} - } - reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) - } - result = append(result, reqs) - } - return result -} - -// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { - result := make(map[string]spec.SecurityScheme) - - for _, v := range requirements { - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - return result -} - -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { - requirements := s.SecurityRequirementsFor(operation) - if len(requirements) == 0 { - return nil - } - - result := make(map[string]spec.SecurityScheme) - for _, reqs := range requirements { - for _, v := range reqs { - if v.Name == "" { - // optional requirement - continue - } - if _, ok := result[v.Name]; ok { - // duplicate requirement - continue - } - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - } - return result -} - -// ConsumesFor gets the mediatypes for the operation -func (s *Spec) ConsumesFor(operation *spec.Operation) []string { - - if len(operation.Consumes) == 0 { - cons := make(map[string]struct{}, len(s.spec.Consumes)) - for _, k := range s.spec.Consumes { - cons[k] = struct{}{} - } - return s.structMapKeys(cons) - } - - cons := make(map[string]struct{}, len(operation.Consumes)) - for _, c := range operation.Consumes { - cons[c] = struct{}{} - } - return s.structMapKeys(cons) -} - -// ProducesFor gets the mediatypes for the operation -func (s *Spec) ProducesFor(operation *spec.Operation) []string { - if len(operation.Produces) == 0 { - prod := make(map[string]struct{}, len(s.spec.Produces)) - for _, k := range s.spec.Produces { - prod[k] = struct{}{} - } - return s.structMapKeys(prod) - } - - prod := make(map[string]struct{}, len(operation.Produces)) - for _, c := range operation.Produces { - prod[c] = struct{}{} - } - return s.structMapKeys(prod) -} - -func mapKeyFromParam(param *spec.Parameter) string { - return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) -} - -func fieldNameFromParam(param *spec.Parameter) string { - // TODO: this should be x-go-name - if nm, ok := param.Extensions.GetString("go-name"); ok { - return nm - } - return swag.ToGoName(param.Name) -} - -// ErrorOnParamFunc is a callback function to be invoked -// whenever an error is encountered while resolving references -// on parameters. -// -// This function takes as input the spec.Parameter which triggered the -// error and the error itself. -// -// If the callback function returns false, the calling function should bail. -// -// If it returns true, the calling function should continue evaluating parameters. -// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). -type ErrorOnParamFunc func(spec.Parameter, error) bool - -func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { - for _, param := range parameters { - pr := param - if pr.Ref.String() != "" { - obj, _, err := pr.Ref.GetPointer().Get(s.spec) - if err != nil { - if callmeOnError != nil { - if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { - continue - } - break - } else { - panic(fmt.Sprintf("invalid reference: %q", pr.Ref.String())) - } - } - if objAsParam, ok := obj.(spec.Parameter); ok { - pr = objAsParam - } else { - if callmeOnError != nil { - if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { - continue - } - break - } else { - panic(fmt.Sprintf("resolved reference is not a parameter: %q", pr.Ref.String())) - } - } - } - res[mapKeyFromParam(&pr)] = pr - } -} - -// ParametersFor the specified operation id. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParametersFor(operationID string) []spec.Parameter { - return s.SafeParametersFor(operationID, nil) -} - -// SafeParametersFor the specified operation id. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { - gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { - bag := make(map[string]spec.Parameter) - s.paramsAsMap(pi.Parameters, bag, callmeOnError) - s.paramsAsMap(op.Parameters, bag, callmeOnError) - - var res []spec.Parameter - for _, v := range bag { - res = append(res, v) - } - return res - } - for _, pi := range s.spec.Paths.Paths { - if pi.Get != nil && pi.Get.ID == operationID { - return gatherParams(&pi, pi.Get) - } - if pi.Head != nil && pi.Head.ID == operationID { - return gatherParams(&pi, pi.Head) - } - if pi.Options != nil && pi.Options.ID == operationID { - return gatherParams(&pi, pi.Options) - } - if pi.Post != nil && pi.Post.ID == operationID { - return gatherParams(&pi, pi.Post) - } - if pi.Patch != nil && pi.Patch.ID == operationID { - return gatherParams(&pi, pi.Patch) - } - if pi.Put != nil && pi.Put.ID == operationID { - return gatherParams(&pi, pi.Put) - } - if pi.Delete != nil && pi.Delete.ID == operationID { - return gatherParams(&pi, pi.Delete) - } - } - return nil -} - -// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { - return s.SafeParamsFor(method, path, nil) -} - -// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { - res := make(map[string]spec.Parameter) - if pi, ok := s.spec.Paths.Paths[path]; ok { - s.paramsAsMap(pi.Parameters, res, callmeOnError) - s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) - } - return res -} - -// OperationForName gets the operation for the given id -func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { - for method, pathItem := range s.operations { - for path, op := range pathItem { - if operationID == op.ID { - return method, path, op, true - } - } - } - return "", "", nil, false -} - -// OperationFor the given method and path -func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { - if mp, ok := s.operations[strings.ToUpper(method)]; ok { - op, fn := mp[path] - return op, fn - } - return nil, false -} - -// Operations gathers all the operations specified in the spec document -func (s *Spec) Operations() map[string]map[string]*spec.Operation { - return s.operations -} - -func (s *Spec) structMapKeys(mp map[string]struct{}) []string { - if len(mp) == 0 { - return nil - } - - result := make([]string, 0, len(mp)) - for k := range mp { - result = append(result, k) - } - return result -} - -// AllPaths returns all the paths in the swagger spec -func (s *Spec) AllPaths() map[string]spec.PathItem { - if s.spec == nil || s.spec.Paths == nil { - return nil - } - return s.spec.Paths.Paths -} - -// OperationIDs gets all the operation ids based on method an dpath -func (s *Spec) OperationIDs() []string { - if len(s.operations) == 0 { - return nil - } - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p, o := range v { - if o.ID != "" { - result = append(result, o.ID) - } else { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - } - return result -} - -// OperationMethodPaths gets all the operation ids based on method an dpath -func (s *Spec) OperationMethodPaths() []string { - if len(s.operations) == 0 { - return nil - } - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p := range v { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - return result -} - -// RequiredConsumes gets all the distinct consumes that are specified in the specification document -func (s *Spec) RequiredConsumes() []string { - return s.structMapKeys(s.consumes) -} - -// RequiredProduces gets all the distinct produces that are specified in the specification document -func (s *Spec) RequiredProduces() []string { - return s.structMapKeys(s.produces) -} - -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec -func (s *Spec) RequiredSecuritySchemes() []string { - return s.structMapKeys(s.authSchemes) -} - -// SchemaRef is a reference to a schema -type SchemaRef struct { - Name string - Ref spec.Ref - Schema *spec.Schema - TopLevel bool -} - -// SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key -func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { - for _, v := range s.allOfs { - result = append(result, v) - } - return -} - -// AllDefinitions returns schema references for all the definitions that were discovered -func (s *Spec) AllDefinitions() (result []SchemaRef) { - for _, v := range s.allSchemas { - result = append(result, v) - } - return -} - -// AllDefinitionReferences returns json refs for all the discovered schemas -func (s *Spec) AllDefinitionReferences() (result []string) { - for _, v := range s.references.schemas { - result = append(result, v.String()) - } - return -} - -// AllParameterReferences returns json refs for all the discovered parameters -func (s *Spec) AllParameterReferences() (result []string) { - for _, v := range s.references.parameters { - result = append(result, v.String()) - } - return -} - -// AllResponseReferences returns json refs for all the discovered responses -func (s *Spec) AllResponseReferences() (result []string) { - for _, v := range s.references.responses { - result = append(result, v.String()) - } - return -} - -// AllPathItemReferences returns the references for all the items -func (s *Spec) AllPathItemReferences() (result []string) { - for _, v := range s.references.pathItems { - result = append(result, v.String()) - } - return -} - -// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). -// -// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid -// Swagger 2.0 spec. -func (s *Spec) AllItemsReferences() (result []string) { - for _, v := range s.references.items { - result = append(result, v.String()) - } - return -} - -// AllReferences returns all the references found in the document, with possible duplicates -func (s *Spec) AllReferences() (result []string) { - for _, v := range s.references.allRefs { - result = append(result, v.String()) - } - return -} - -// AllRefs returns all the unique references found in the document -func (s *Spec) AllRefs() (result []spec.Ref) { - set := make(map[string]struct{}) - for _, v := range s.references.allRefs { - a := v.String() - if a == "" { - continue - } - if _, ok := set[a]; !ok { - set[a] = struct{}{} - result = append(result, v) - } - } - return -} - -func cloneStringMap(source map[string]string) map[string]string { - res := make(map[string]string, len(source)) - for k, v := range source { - res[k] = v - } - return res -} - -func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { - res := make(map[string][]interface{}, len(source)) - for k, v := range source { - res[k] = v - } - return res -} - -// ParameterPatterns returns all the patterns found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterPatterns() map[string]string { - return cloneStringMap(s.patterns.parameters) -} - -// HeaderPatterns returns all the patterns found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderPatterns() map[string]string { - return cloneStringMap(s.patterns.headers) -} - -// ItemsPatterns returns all the patterns found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsPatterns() map[string]string { - return cloneStringMap(s.patterns.items) -} - -// SchemaPatterns returns all the patterns found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaPatterns() map[string]string { - return cloneStringMap(s.patterns.schemas) -} - -// AllPatterns returns all the patterns found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllPatterns() map[string]string { - return cloneStringMap(s.patterns.allPatterns) -} - -// ParameterEnums returns all the enums found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.parameters) -} - -// HeaderEnums returns all the enums found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.headers) -} - -// ItemsEnums returns all the enums found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.items) -} - -// SchemaEnums returns all the enums found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.schemas) -} - -// AllEnums returns all the enums found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.allEnums) -} diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml deleted file mode 100644 index 3239d74416a7..000000000000 --- a/vendor/github.com/go-openapi/analysis/appveyor.yml +++ /dev/null @@ -1,33 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\analysis -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.12 - -test_script: - - go test -v -timeout 20m ./... -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go deleted file mode 100644 index 84cc4e54cb79..000000000000 --- a/vendor/github.com/go-openapi/analysis/debug.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "log" - "os" - "path/filepath" - "runtime" -) - -var ( - // Debug is true when the SWAGGER_DEBUG env var is not empty. - // It enables a more verbose logging of the spec analyzer. - Debug = os.Getenv("SWAGGER_DEBUG") != "" - // analysisLogger is a debug logger for this package - analysisLogger *log.Logger -) - -func init() { - debugOptions() -} - -func debugOptions() { - analysisLogger = log.New(os.Stdout, "analysis:", log.LstdFlags) -} - -func debugLog(msg string, args ...interface{}) { - // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() - if Debug { - _, file1, pos1, _ := runtime.Caller(1) - analysisLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go deleted file mode 100644 index d5294c0950b6..000000000000 --- a/vendor/github.com/go-openapi/analysis/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package analysis provides methods to work with a Swagger specification document from -package go-openapi/spec. - -Analyzing a specification - -An analysed specification object (type Spec) provides methods to work with swagger definition. - -Flattening or expanding a specification - -Flattening a specification bundles all remote $ref in the main spec document. -Depending on flattening options, additional preprocessing may take place: - - full flattening: replacing all inline complex constructs by a named entry in #/definitions - - expand: replace all $ref's in the document by their expanded content - -Merging several specifications - -Mixin several specifications merges all Swagger constructs, and warns about found conflicts. - -Fixing a specification - -Unmarshalling a specification with golang json unmarshalling may lead to -some unwanted result on present but empty fields. - -Analyzing a Swagger schema - -Swagger schemas are analyzed to determine their complexity and qualify their content. -*/ -package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go deleted file mode 100644 index bfe014ca51a8..000000000000 --- a/vendor/github.com/go-openapi/analysis/fixer.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import "github.com/go-openapi/spec" - -// FixEmptyResponseDescriptions replaces empty ("") response -// descriptions in the input with "(empty)" to ensure that the -// resulting Swagger is stays valid. The problem appears to arise -// from reading in valid specs that have a explicit response -// description of "" (valid, response.description is required), but -// due to zero values being omitted upon re-serializing (omitempty) we -// lose them unless we stick some chars in there. -func FixEmptyResponseDescriptions(s *spec.Swagger) { - if s.Paths != nil { - for _, v := range s.Paths.Paths { - if v.Get != nil { - FixEmptyDescs(v.Get.Responses) - } - if v.Put != nil { - FixEmptyDescs(v.Put.Responses) - } - if v.Post != nil { - FixEmptyDescs(v.Post.Responses) - } - if v.Delete != nil { - FixEmptyDescs(v.Delete.Responses) - } - if v.Options != nil { - FixEmptyDescs(v.Options.Responses) - } - if v.Head != nil { - FixEmptyDescs(v.Head.Responses) - } - if v.Patch != nil { - FixEmptyDescs(v.Patch.Responses) - } - } - } - for k, v := range s.Responses { - FixEmptyDesc(&v) - s.Responses[k] = v - } -} - -// FixEmptyDescs adds "(empty)" as the description for any Response in -// the given Responses object that doesn't already have one. -func FixEmptyDescs(rs *spec.Responses) { - FixEmptyDesc(rs.Default) - for k, v := range rs.StatusCodeResponses { - FixEmptyDesc(&v) - rs.StatusCodeResponses[k] = v - } -} - -// FixEmptyDesc adds "(empty)" as the description to the given -// Response object if it doesn't already have one and isn't a -// ref. No-op on nil input. -func FixEmptyDesc(rs *spec.Response) { - if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { - return - } - rs.Description = "(empty)" -} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go deleted file mode 100644 index ae1eef5d197a..000000000000 --- a/vendor/github.com/go-openapi/analysis/flatten.go +++ /dev/null @@ -1,1732 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "log" - "net/http" - "net/url" - "os" - slashpath "path" - "path/filepath" - "sort" - "strings" - - "strconv" - - "github.com/go-openapi/analysis/internal" - "github.com/go-openapi/jsonpointer" - swspec "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// FlattenOpts configuration for flattening a swagger specification. -type FlattenOpts struct { - Spec *Spec // The analyzed spec to work with - flattenContext *context // Internal context to track flattening activity - - BasePath string - - // Flattening options - Expand bool // If Expand is true, we skip flattening the spec and expand it instead - Minimal bool - Verbose bool - RemoveUnused bool - - /* Extra keys */ - _ struct{} // require keys -} - -// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. -func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *swspec.ExpandOptions { - return &swspec.ExpandOptions{RelativeBase: f.BasePath, SkipSchemas: skipSchemas} -} - -// Swagger gets the swagger specification for this flatten operation -func (f *FlattenOpts) Swagger() *swspec.Swagger { - return f.Spec.spec -} - -// newRef stores information about refs created during the flattening process -type newRef struct { - key string - newName string - path string - isOAIGen bool - resolved bool - schema *swspec.Schema - parents []string -} - -// context stores intermediary results from flatten -type context struct { - newRefs map[string]*newRef - warnings []string - resolved map[string]string -} - -func newContext() *context { - return &context{ - newRefs: make(map[string]*newRef, 150), - warnings: make([]string, 0), - resolved: make(map[string]string, 50), - } -} - -// Flatten an analyzed spec and produce a self-contained spec bundle. -// -// There is a minimal and a full flattening mode. -// -// Minimally flattening a spec means: -// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left -// unscathed) -// - Importing external (http, file) references so they become internal to the document -// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers -// like "$ref": "#/definitions/myObject/allOfs/1") -// -// A minimally flattened spec thus guarantees the following properties: -// - all $refs point to a local definition (i.e. '#/definitions/...') -// - definitions are unique -// -// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they -// represent a complex schema or express commonality in the spec. -// Otherwise, they are simply expanded. -// -// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. -// -// Fully flattening a spec means: -// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. -// -// By complex, we mean every JSON object with some properties. -// Arrays, when they do not define a tuple, -// or empty objects with or without additionalProperties, are not considered complex and remain inline. -// -// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions -// have been created. -// -// Available flattening options: -// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched -// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) -// - Verbose: croaks about name conflicts detected -// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening -// -// NOTE: expansion removes all $ref save circular $ref, which remain in place -// -// TODO: additional options -// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a -// x-go-name extension -// - LiftAllOfs: -// - limit the flattening of allOf members when simple objects -// - merge allOf with validation only -// - merge allOf with extensions only -// - ... -// -func Flatten(opts FlattenOpts) error { - // Make sure opts.BasePath is an absolute path - if !filepath.IsAbs(opts.BasePath) { - cwd, _ := os.Getwd() - opts.BasePath = filepath.Join(cwd, opts.BasePath) - } - // make sure drive letter on windows is normalized to lower case - u, _ := url.Parse(opts.BasePath) - opts.BasePath = u.String() - - opts.flattenContext = newContext() - - // recursively expand responses, parameters, path items and items in simple schemas. - // This simplifies the spec and leaves $ref only into schema objects. - if err := swspec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { - return err - } - - // strip current file from $ref's, so we can recognize them as proper definitions - // In particular, this works around for issue go-openapi/spec#76: leading absolute file in $ref is stripped - if err := normalizeRef(&opts); err != nil { - return err - } - - if opts.RemoveUnused { - // optionally removes shared parameters and responses already expanded (now unused) - // default parameters (i.e. under paths) remain. - opts.Swagger().Parameters = nil - opts.Swagger().Responses = nil - } - - opts.Spec.reload() // re-analyze - - // at this point there are no references left but in schemas - - for imported := false; !imported; { - // iteratively import remote references until none left. - // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") - var err error - if imported, err = importExternalReferences(&opts); err != nil { - return err - } - opts.Spec.reload() // re-analyze - } - - if !opts.Minimal && !opts.Expand { - // full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) - if err := nameInlinedSchemas(&opts); err != nil { - return err - } - - opts.Spec.reload() // re-analyze - } - - // rewrite JSON pointers other than $ref to named definitions - // and attempt to resolve conflicting names whenever possible. - if err := stripPointersAndOAIGen(&opts); err != nil { - return err - } - - if opts.RemoveUnused { - // remove unused definitions - expected := make(map[string]struct{}) - for k := range opts.Swagger().Definitions { - expected[slashpath.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} - } - for _, k := range opts.Spec.AllDefinitionReferences() { - delete(expected, k) - } - for k := range expected { - debugLog("removing unused definition %s", slashpath.Base(k)) - if opts.Verbose { - log.Printf("info: removing unused definition: %s", slashpath.Base(k)) - } - delete(opts.Swagger().Definitions, slashpath.Base(k)) - } - opts.Spec.reload() // re-analyze - } - - // TODO: simplify known schema patterns to flat objects with properties - // examples: - // - lift simple allOf object, - // - empty allOf with validation only or extensions only - // - rework allOf arrays - // - rework allOf additionalProperties - - if opts.Verbose { - // issue notifications - croak(&opts) - } - return nil -} - -// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). -// -// Complex means the schema is any of: -// - a simple type (primitive) -// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) -// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will -// generate a definition) -func isAnalyzedAsComplex(asch *AnalyzedSchema) bool { - if !asch.IsSimpleSchema && !asch.IsArray && !asch.IsMap { - return true - } - return false -} - -// nameInlinedSchemas replaces every complex inline construct by a named definition. -func nameInlinedSchemas(opts *FlattenOpts) error { - debugLog("nameInlinedSchemas") - namer := &inlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: opRefsByRef(gatherOperations(opts.Spec, nil)), - flattenContext: opts.flattenContext, - opts: opts, - } - depthFirst := sortDepthFirst(opts.Spec.allSchemas) - for _, key := range depthFirst { - sch := opts.Spec.allSchemas[key] - if sch.Schema != nil && sch.Schema.Ref.String() == "" && !sch.TopLevel { // inline schema - asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return fmt.Errorf("schema analysis [%s]: %v", key, err) - } - - if isAnalyzedAsComplex(asch) { // move complex schemas to definitions - if err := namer.Name(key, sch.Schema, asch); err != nil { - return err - } - } - } - } - return nil -} - -var depthGroupOrder = []string{ - "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", -} - -func sortDepthFirst(data map[string]SchemaRef) []string { - // group by category (shared params, op param, statuscode response, default response, definitions) - // sort groups internally by number of parts in the key and lexical names - // flatten groups into a single list of keys - sorted := make([]string, 0, len(data)) - grouped := make(map[string]keys, len(data)) - for k := range data { - split := keyParts(k) - var pk string - if split.IsSharedOperationParam() { - pk = "sharedOpParam" - } - if split.IsOperationParam() { - pk = "opParam" - } - if split.IsStatusCodeResponse() { - pk = "codeResponse" - } - if split.IsDefaultResponse() { - pk = "defaultResponse" - } - if split.IsDefinition() { - pk = "definition" - } - if split.IsSharedParam() { - pk = "sharedParam" - } - if split.IsSharedResponse() { - pk = "sharedResponse" - } - grouped[pk] = append(grouped[pk], key{Segments: len(split), Key: k}) - } - - for _, pk := range depthGroupOrder { - res := grouped[pk] - sort.Sort(res) - for _, v := range res { - sorted = append(sorted, v.Key) - } - } - return sorted -} - -type key struct { - Segments int - Key string -} -type keys []key - -func (k keys) Len() int { return len(k) } -func (k keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k keys) Less(i, j int) bool { - return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) -} - -type inlineSchemaNamer struct { - Spec *swspec.Swagger - Operations map[string]opRef - flattenContext *context - opts *FlattenOpts -} - -func opRefsByRef(oprefs map[string]opRef) map[string]opRef { - result := make(map[string]opRef, len(oprefs)) - for _, v := range oprefs { - result[v.Ref.String()] = v - } - return result -} - -func (isn *inlineSchemaNamer) Name(key string, schema *swspec.Schema, aschema *AnalyzedSchema) error { - debugLog("naming inlined schema at %s", key) - - parts := keyParts(key) - for _, name := range namesFromKey(parts, aschema, isn.Operations) { - if name != "" { - // create unique name - newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) - - // clone schema - sch, err := cloneSchema(schema) - if err != nil { - return err - } - - // replace values on schema - if err := rewriteSchemaToRef(isn.Spec, key, - swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { - return fmt.Errorf("error while creating definition %q from inline schema: %v", newName, err) - } - - // rewrite any dependent $ref pointing to this place, - // when not already pointing to a top-level definition. - // - // NOTE: this is important if such referers use arbitrary JSON pointers. - an := New(isn.Spec) - for k, v := range an.references.allRefs { - r, _, erd := deepestRef(isn.opts, v) - if erd != nil { - return fmt.Errorf("at %s, %v", k, erd) - } - if r.String() == key || - r.String() == slashpath.Join(definitionsPath, newName) && - slashpath.Dir(v.String()) != definitionsPath { - debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) - - // rewrite $ref to the new target - if err := updateRef(isn.Spec, k, - swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { - return err - } - } - } - - // NOTE: this extension is currently not used by go-swagger (provided for information only) - sch.AddExtension("x-go-gen-location", genLocation(parts)) - - // save cloned schema to definitions - saveSchema(isn.Spec, newName, sch) - - // keep track of created refs - if isn.flattenContext != nil { - debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) - resolved := false - if _, ok := isn.flattenContext.newRefs[key]; ok { - resolved = isn.flattenContext.newRefs[key].resolved - } - isn.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: slashpath.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - } - } - return nil -} - -// genLocation indicates from which section of the specification (models or operations) a definition has been created. -// -// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided -// for information only. -func genLocation(parts splitKey) string { - if parts.IsOperation() { - return "operations" - } - if parts.IsDefinition() { - return "models" - } - return "" -} - -// uniqifyName yields a unique name for a definition -func uniqifyName(definitions swspec.Definitions, name string) (string, bool) { - isOAIGen := false - if name == "" { - name = "oaiGen" - isOAIGen = true - } - if len(definitions) == 0 { - return name, isOAIGen - } - - unq := true - for k := range definitions { - if strings.EqualFold(k, name) { - unq = false - break - } - } - - if unq { - return name, isOAIGen - } - - name += "OAIGen" - isOAIGen = true - var idx int - unique := name - _, known := definitions[unique] - for known { - idx++ - unique = fmt.Sprintf("%s%d", name, idx) - _, known = definitions[unique] - } - return unique, isOAIGen -} - -func namesFromKey(parts splitKey, aschema *AnalyzedSchema, operations map[string]opRef) []string { - var baseNames [][]string - var startIndex int - if parts.IsOperation() { - // params - if parts.IsOperationParam() || parts.IsSharedOperationParam() { - piref := parts.PathItemRef() - if piref.String() != "" && parts.IsOperationParam() { - if op, ok := operations[piref.String()]; ok { - startIndex = 5 - baseNames = append(baseNames, []string{op.ID, "params", "body"}) - } - } else if parts.IsSharedOperationParam() { - pref := parts.PathRef() - for k, v := range operations { - if strings.HasPrefix(k, pref.String()) { - startIndex = 4 - baseNames = append(baseNames, []string{v.ID, "params", "body"}) - } - } - } - } - // responses - if parts.IsOperationResponse() { - piref := parts.PathItemRef() - if piref.String() != "" { - if op, ok := operations[piref.String()]; ok { - startIndex = 6 - baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) - } - } - } - } - - // definitions - if parts.IsDefinition() { - nm := parts.DefinitionName() - if nm != "" { - startIndex = 2 - baseNames = append(baseNames, []string{parts.DefinitionName()}) - } - } - - var result []string - for _, segments := range baseNames { - nm := parts.BuildName(segments, startIndex, aschema) - if nm != "" { - result = append(result, nm) - } - } - sort.Strings(result) - return result -} - -const ( - paths = "paths" - responses = "responses" - parameters = "parameters" - definitions = "definitions" - definitionsPath = "#/definitions" -) - -var ( - ignoredKeys map[string]struct{} - validMethods map[string]struct{} -) - -func init() { - ignoredKeys = map[string]struct{}{ - "schema": {}, - "properties": {}, - "not": {}, - "anyOf": {}, - "oneOf": {}, - } - - validMethods = map[string]struct{}{ - "GET": {}, - "HEAD": {}, - "OPTIONS": {}, - "PATCH": {}, - "POST": {}, - "PUT": {}, - "DELETE": {}, - } -} - -type splitKey []string - -func (s splitKey) IsDefinition() bool { - return len(s) > 1 && s[0] == definitions -} - -func (s splitKey) DefinitionName() string { - if !s.IsDefinition() { - return "" - } - return s[1] -} - -func (s splitKey) isKeyName(i int) bool { - if i <= 0 { - return false - } - count := 0 - for idx := i - 1; idx > 0; idx-- { - if s[idx] != "properties" { - break - } - count++ - } - - return count%2 != 0 -} - -func (s splitKey) BuildName(segments []string, startIndex int, aschema *AnalyzedSchema) string { - for i, part := range s[startIndex:] { - if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { - if part == "items" || part == "additionalItems" { - if aschema.IsTuple || aschema.IsTupleWithExtra { - segments = append(segments, "tuple") - } else { - segments = append(segments, "items") - } - if part == "additionalItems" { - segments = append(segments, part) - } - continue - } - segments = append(segments, part) - } - } - return strings.Join(segments, " ") -} - -func (s splitKey) IsOperation() bool { - return len(s) > 1 && s[0] == paths -} - -func (s splitKey) IsSharedOperationParam() bool { - return len(s) > 2 && s[0] == paths && s[2] == parameters -} - -func (s splitKey) IsSharedParam() bool { - return len(s) > 1 && s[0] == parameters -} - -func (s splitKey) IsOperationParam() bool { - return len(s) > 3 && s[0] == paths && s[3] == parameters -} - -func (s splitKey) IsOperationResponse() bool { - return len(s) > 3 && s[0] == paths && s[3] == responses -} - -func (s splitKey) IsSharedResponse() bool { - return len(s) > 1 && s[0] == responses -} - -func (s splitKey) IsDefaultResponse() bool { - return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" -} - -func (s splitKey) IsStatusCodeResponse() bool { - isInt := func() bool { - _, err := strconv.Atoi(s[4]) - return err == nil - } - return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() -} - -func (s splitKey) ResponseName() string { - if s.IsStatusCodeResponse() { - code, _ := strconv.Atoi(s[4]) - return http.StatusText(code) - } - if s.IsDefaultResponse() { - return "Default" - } - return "" -} - -func (s splitKey) PathItemRef() swspec.Ref { - if len(s) < 3 { - return swspec.Ref{} - } - pth, method := s[1], s[2] - if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { - return swspec.Ref{} - } - return swspec.MustCreateRef("#" + slashpath.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) -} - -func (s splitKey) PathRef() swspec.Ref { - if !s.IsOperation() { - return swspec.Ref{} - } - return swspec.MustCreateRef("#" + slashpath.Join("/", paths, jsonpointer.Escape(s[1]))) -} - -func keyParts(key string) splitKey { - var res []string - for _, part := range strings.Split(key[1:], "/") { - if part != "" { - res = append(res, jsonpointer.Unescape(part)) - } - } - return res -} - -func rewriteSchemaToRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { - debugLog("rewriting schema to ref for %s with %s", key, ref.String()) - _, value, err := getPointerFromKey(spec, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *swspec.Schema: - return rewriteParentRef(spec, key, ref) - - case swspec.Schema: - return rewriteParentRef(spec, key, ref) - - case *swspec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - } - - case *swspec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - } - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -func rewriteParentRef(spec *swspec.Swagger, key string, ref swspec.Ref) error { - parent, entry, pvalue, err := getParentFromKey(spec, key) - if err != nil { - return err - } - - debugLog("rewriting holder for %T", pvalue) - switch container := pvalue.(type) { - case swspec.Response: - if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { - return err - } - - case *swspec.Response: - container.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case *swspec.Responses: - statusCode, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", key[1:], err) - } - resp := container.StatusCodeResponses[statusCode] - resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - container.StatusCodeResponses[statusCode] = resp - - case map[string]swspec.Response: - resp := container[entry] - resp.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - container[entry] = resp - - case swspec.Parameter: - if err := rewriteParentRef(spec, "#"+parent, ref); err != nil { - return err - } - - case map[string]swspec.Parameter: - param := container[entry] - param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - container[entry] = param - - case []swspec.Parameter: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", key[1:], err) - } - param := container[idx] - param.Schema = &swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - container[idx] = param - - case swspec.Definitions: - container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case map[string]swspec.Schema: - container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case []swspec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", key[1:], err) - } - container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case *swspec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", key[1:], err) - } - container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) - } - return nil -} - -func cloneSchema(schema *swspec.Schema) (*swspec.Schema, error) { - var sch swspec.Schema - if err := swag.FromDynamicJSON(schema, &sch); err != nil { - return nil, fmt.Errorf("cannot clone schema: %v", err) - } - return &sch, nil -} - -// importExternalReferences iteratively digs remote references and imports them into the main schema. -// -// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. -// -// This returns true when no more remote references can be found. -func importExternalReferences(opts *FlattenOpts) (bool, error) { - debugLog("importExternalReferences") - - groupedRefs := reverseIndexForSchemaRefs(opts) - sortedRefStr := make([]string, 0, len(groupedRefs)) - if opts.flattenContext == nil { - opts.flattenContext = newContext() - } - - // sort $ref resolution to ensure deterministic name conflict resolution - for refStr := range groupedRefs { - sortedRefStr = append(sortedRefStr, refStr) - } - sort.Strings(sortedRefStr) - - complete := true - - for _, refStr := range sortedRefStr { - entry := groupedRefs[refStr] - if entry.Ref.HasFragmentOnly { - continue - } - complete = false - var isOAIGen bool - - newName := opts.flattenContext.resolved[refStr] - if newName != "" { - // rewrite ref with already resolved external ref (useful for cyclical refs): - // rewrite external refs to local ones - debugLog("resolving known ref [%s] to %s", refStr, newName) - for _, key := range entry.Keys { - if err := updateRef(opts.Swagger(), key, - swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { - return false, err - } - } - } else { - // resolve schemas - debugLog("resolving schema from remote $ref [%s]", refStr) - sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) - if err != nil { - return false, fmt.Errorf("could not resolve schema: %v", err) - } - - // at this stage only $ref analysis matters - partialAnalyzer := &Spec{ - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - partialAnalyzer.reset() - partialAnalyzer.analyzeSchema("", *sch, "/") - - // now rewrite those refs with rebase - for key, ref := range partialAnalyzer.references.allRefs { - if err := updateRef(sch, key, swspec.MustCreateRef(rebaseRef(entry.Ref.String(), ref.String()))); err != nil { - return false, fmt.Errorf("failed to rewrite ref for key %q at %s: %v", key, entry.Ref.String(), err) - } - } - - // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name - newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) - debugLog("new name for [%s]: %s - with name conflict:%t", - strings.Join(entry.Keys, ", "), newName, isOAIGen) - - opts.flattenContext.resolved[refStr] = newName - - // rewrite the external refs to local ones - for _, key := range entry.Keys { - if err := updateRef(opts.Swagger(), key, - swspec.MustCreateRef(slashpath.Join(definitionsPath, newName))); err != nil { - return false, err - } - - // keep track of created refs - resolved := false - if _, ok := opts.flattenContext.newRefs[key]; ok { - resolved = opts.flattenContext.newRefs[key].resolved - } - opts.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: slashpath.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - - // add the resolved schema to the definitions - saveSchema(opts.Swagger(), newName, sch) - } - } - // maintains ref index entries - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - - // update tracking with resolved schemas - if r.schema.Ref.String() != "" { - ref := swspec.MustCreateRef(r.path) - sch, err := swspec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) - if err != nil { - return false, fmt.Errorf("could not resolve schema: %v", err) - } - r.schema = sch - } - // update tracking with renamed keys: got a cascade of refs - if r.path != k { - renamed := *r - renamed.key = r.path - opts.flattenContext.newRefs[renamed.path] = &renamed - - // indirect ref - r.newName = slashpath.Base(k) - r.schema = swspec.RefSchema(r.path) - r.path = k - r.isOAIGen = strings.Contains(k, "OAIGen") - } - } - - return complete, nil -} - -type refRevIdx struct { - Ref swspec.Ref - Keys []string -} - -// rebaseRef rebase a remote ref relative to a base ref. -// -// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func rebaseRef(baseRef string, ref string) string { - debugLog("rebasing ref: %s onto %s", ref, baseRef) - baseRef, _ = url.PathUnescape(baseRef) - ref, _ = url.PathUnescape(ref) - if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { - return ref - } - - parts := strings.Split(ref, "#") - - baseParts := strings.Split(baseRef, "#") - baseURL, _ := url.Parse(baseParts[0]) - if strings.HasPrefix(ref, "#") { - if baseURL.Host == "" { - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - - refURL, _ := url.Parse(parts[0]) - if refURL.Host != "" || filepath.IsAbs(parts[0]) { - // not rebasing an absolute path - return ref - } - - // there is a relative path - var basePath string - if baseURL.Host != "" { - // when there is a host, standard URI rules apply (with "/") - baseURL.Path = slashpath.Dir(baseURL.Path) - baseURL.Path = slashpath.Join(baseURL.Path, "/"+parts[0]) - return baseURL.String() - } - - // this is a local relative path - // basePart[0] and parts[0] are local filesystem directories/files - basePath = filepath.Dir(baseParts[0]) - relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) - if len(parts) > 1 { - return strings.Join([]string{relPath, parts[1]}, "#") - } - return relPath -} - -// normalizePath renders absolute path on remote file refs -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func normalizePath(ref swspec.Ref, opts *FlattenOpts) (normalizedPath string) { - uri, _ := url.PathUnescape(ref.String()) - if ref.HasFragmentOnly || filepath.IsAbs(uri) { - normalizedPath = uri - return - } - - refURL, _ := url.Parse(uri) - if refURL.Host != "" { - normalizedPath = uri - return - } - - parts := strings.Split(uri, "#") - // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage - parts[0] = filepath.Join(filepath.Dir(opts.BasePath), parts[0]) - normalizedPath = strings.Join(parts, "#") - return -} - -func reverseIndexForSchemaRefs(opts *FlattenOpts) map[string]refRevIdx { - collected := make(map[string]refRevIdx) - for key, schRef := range opts.Spec.references.schemas { - // normalize paths before sorting, - // so we get together keys in same external file - normalizedPath := normalizePath(schRef, opts) - if entry, ok := collected[normalizedPath]; ok { - entry.Keys = append(entry.Keys, key) - collected[normalizedPath] = entry - } else { - collected[normalizedPath] = refRevIdx{ - Ref: schRef, - Keys: []string{key}, - } - } - } - return collected -} - -func nameFromRef(ref swspec.Ref) string { - u := ref.GetURL() - if u.Fragment != "" { - return swag.ToJSONName(slashpath.Base(u.Fragment)) - } - if u.Path != "" { - bn := slashpath.Base(u.Path) - if bn != "" && bn != "/" { - ext := slashpath.Ext(bn) - if ext != "" { - return swag.ToJSONName(bn[:len(bn)-len(ext)]) - } - return swag.ToJSONName(bn) - } - } - return swag.ToJSONName(strings.Replace(u.Host, ".", " ", -1)) -} - -func saveSchema(spec *swspec.Swagger, name string, schema *swspec.Schema) { - if schema == nil { - return - } - if spec.Definitions == nil { - spec.Definitions = make(map[string]swspec.Schema, 150) - } - spec.Definitions[name] = *schema -} - -// getPointerFromKey retrieves the content of the JSON pointer "key" -func getPointerFromKey(spec interface{}, key string) (string, interface{}, error) { - switch spec.(type) { - case *swspec.Schema: - case *swspec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - if key == "#/" { - return "", spec, nil - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := internal.PathUnescape(key[1:]) - ptr, err := jsonpointer.New(pth) - if err != nil { - return "", nil, err - } - - value, _, err := ptr.Get(spec) - if err != nil { - debugLog("error when getting key: %s with path: %s", key, pth) - return "", nil, err - } - return pth, value, nil -} - -// getParentFromKey retrieves the container of the JSON pointer "key" -func getParentFromKey(spec interface{}, key string) (string, string, interface{}, error) { - switch spec.(type) { - case *swspec.Schema: - case *swspec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := internal.PathUnescape(key[1:]) - - parent, entry := slashpath.Dir(pth), slashpath.Base(pth) - debugLog("getting schema holder at: %s, with entry: %s", parent, entry) - - pptr, err := jsonpointer.New(parent) - if err != nil { - return "", "", nil, err - } - pvalue, _, err := pptr.Get(spec) - if err != nil { - return "", "", nil, fmt.Errorf("can't get parent for %s: %v", parent, err) - } - return parent, entry, pvalue, nil -} - -// updateRef replaces a ref by another one -func updateRef(spec interface{}, key string, ref swspec.Ref) error { - switch spec.(type) { - case *swspec.Schema: - case *swspec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - debugLog("updating ref for %s with %s", key, ref.String()) - pth, value, err := getPointerFromKey(spec, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *swspec.Schema: - refable.Ref = ref - case *swspec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case *swspec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case swspec.Schema: - debugLog("rewriting holder for %T", refable) - _, entry, pvalue, erp := getParentFromKey(spec, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case swspec.Definitions: - container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case map[string]swspec.Schema: - container[entry] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case []swspec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", pth, err) - } - container[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - case *swspec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", pth, err) - } - container.Schemas[idx] = swspec.Schema{SchemaProps: swspec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled container type at %s: %T", key, value) - } - - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -// updateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) -func updateRefWithSchema(spec *swspec.Swagger, key string, sch *swspec.Schema) error { - debugLog("updating ref for %s with schema", key) - pth, value, err := getPointerFromKey(spec, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *swspec.Schema: - *refable = *sch - case swspec.Schema: - _, entry, pvalue, erp := getParentFromKey(spec, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case swspec.Definitions: - container[entry] = *sch - - case map[string]swspec.Schema: - container[entry] = *sch - - case []swspec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", pth, err) - } - container[idx] = *sch - - case *swspec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %v", pth, err) - } - container.Schemas[idx] = *sch - - // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) - } - case *swspec.SchemaOrArray: - *refable.Schema = *sch - // NOTE: can't have case *swspec.SchemaOrBool = parent in this case is *Schema - case *swspec.SchemaOrBool: - *refable.Schema = *sch - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -func containsString(names []string, name string) bool { - for _, nm := range names { - if nm == name { - return true - } - } - return false -} - -type opRef struct { - Method string - Path string - Key string - ID string - Op *swspec.Operation - Ref swspec.Ref -} - -type opRefs []opRef - -func (o opRefs) Len() int { return len(o) } -func (o opRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o opRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } - -func gatherOperations(specDoc *Spec, operationIDs []string) map[string]opRef { - var oprefs opRefs - - for method, pathItem := range specDoc.Operations() { - for pth, operation := range pathItem { - vv := *operation - oprefs = append(oprefs, opRef{ - Key: swag.ToGoName(strings.ToLower(method) + " " + pth), - Method: method, - Path: pth, - ID: vv.ID, - Op: &vv, - Ref: swspec.MustCreateRef("#" + slashpath.Join("/paths", jsonpointer.Escape(pth), method)), - }) - } - } - - sort.Sort(oprefs) - - operations := make(map[string]opRef) - for _, opr := range oprefs { - nm := opr.ID - if nm == "" { - nm = opr.Key - } - - oo, found := operations[nm] - if found && oo.Method != opr.Method && oo.Path != opr.Path { - nm = opr.Key - } - if len(operationIDs) == 0 || containsString(operationIDs, opr.ID) || containsString(operationIDs, nm) { - opr.ID = nm - opr.Op.ID = nm - operations[nm] = opr - } - } - return operations -} - -// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. -// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. -func stripPointersAndOAIGen(opts *FlattenOpts) error { - // name all JSON pointers to anonymous documents - if err := namePointers(opts); err != nil { - return err - } - - // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) - hasIntroducedPointerOrInline, ers := stripOAIGen(opts) - if ers != nil { - return ers - } - - // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers - for hasIntroducedPointerOrInline { - if !opts.Minimal { - opts.Spec.reload() // re-analyze - if err := nameInlinedSchemas(opts); err != nil { - return err - } - } - - if err := namePointers(opts); err != nil { - return err - } - - // restrip - if hasIntroducedPointerOrInline, ers = stripOAIGen(opts); ers != nil { - return ers - } - - opts.Spec.reload() // re-analyze - } - return nil -} - -// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. -// -// A dedupe is deemed unnecessary whenever: -// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) -// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to -// the first parent. -// -// This function returns a true bool whenever it re-inlined a complex schema, so the caller may chose to iterate -// pointer and name resolution again. -func stripOAIGen(opts *FlattenOpts) (bool, error) { - debugLog("stripOAIGen") - replacedWithComplex := false - - // figure out referers of OAIGen definitions - for _, r := range opts.flattenContext.newRefs { - if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) - continue - } - for k, v := range opts.Spec.references.allRefs { - if r.path != v.String() { - continue - } - found := false - for _, p := range r.parents { - if p == k { - found = true - break - } - } - if !found { - r.parents = append(r.parents, k) - } - } - } - - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - //debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", - // k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) - if r.isOAIGen && len(r.parents) >= 1 { - pr := r.parents - sort.Strings(pr) - - // rewrite first parent schema in lexicographical order - debugLog("rewrite first parent in lex order %s with schema", pr[0]) - if err := updateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { - return false, err - } - if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { - // update parent in ref index entry - debugLog("update parent entry: %s", pr[0]) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - - // rewrite other parents to point to first parent - if len(pr) > 1 { - for _, p := range pr[1:] { - replacingRef := swspec.MustCreateRef(pr[0]) - - // set complex when replacing ref is an anonymous jsonpointer: further processing may be required - replacedWithComplex = replacedWithComplex || - slashpath.Dir(replacingRef.String()) != definitionsPath - debugLog("rewrite parent with ref: %s", replacingRef.String()) - - // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). - // Those are stripped later on. - if err := updateRef(opts.Swagger(), p, replacingRef); err != nil { - return false, err - } - - if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { - // update parent in ref index - debugLog("update parent entry: %s", p) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - } - } - - // remove OAIGen definition - debugLog("removing definition %s", slashpath.Base(r.path)) - delete(opts.Swagger().Definitions, slashpath.Base(r.path)) - - // propagate changes in ref index for keys which have this one as a parent - for kk, value := range opts.flattenContext.newRefs { - if kk == k || !value.isOAIGen || value.resolved { - continue - } - found := false - newParents := make([]string, 0, len(value.parents)) - for _, parent := range value.parents { - switch { - case parent == r.path: - found = true - parent = pr[0] - case strings.HasPrefix(parent, r.path+"/"): - found = true - parent = slashpath.Join(pr[0], strings.TrimPrefix(parent, r.path)) - } - newParents = append(newParents, parent) - } - if found { - value.parents = newParents - } - } - - // mark naming conflict as resolved - debugLog("marking naming conflict resolved for key: %s", r.key) - opts.flattenContext.newRefs[r.key].isOAIGen = false - opts.flattenContext.newRefs[r.key].resolved = true - - // determine if the previous substitution did inline a complex schema - if r.schema != nil && r.schema.Ref.String() == "" { // inline schema - asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return false, err - } - debugLog("re-inlined schema: parent: %s, %t", pr[0], isAnalyzedAsComplex(asch)) - replacedWithComplex = replacedWithComplex || - !(slashpath.Dir(pr[0]) == definitionsPath) && isAnalyzedAsComplex(asch) - } - } - } - - debugLog("replacedWithComplex: %t", replacedWithComplex) - opts.Spec.reload() // re-analyze - return replacedWithComplex, nil -} - -// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting -// from flattening a spec -func croak(opts *FlattenOpts) { - reported := make(map[string]bool, len(opts.flattenContext.newRefs)) - for _, v := range opts.Spec.references.allRefs { - // warns about duplicate handling - for _, r := range opts.flattenContext.newRefs { - if r.isOAIGen && r.path == v.String() { - reported[r.newName] = true - } - } - } - for k := range reported { - log.Printf("warning: duplicate flattened definition name resolved as %s", k) - } - // warns about possible type mismatches - uniqueMsg := make(map[string]bool) - for _, msg := range opts.flattenContext.warnings { - if _, ok := uniqueMsg[msg]; ok { - continue - } - log.Printf("warning: %s", msg) - uniqueMsg[msg] = true - } -} - -// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. -// -// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. -// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). -func namePointers(opts *FlattenOpts) error { - debugLog("name pointers") - refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) - for k, ref := range opts.Spec.references.allRefs { - if slashpath.Dir(ref.String()) == definitionsPath { - // this a ref to a top-level definition: ok - continue - } - replacingRef, sch, erd := deepestRef(opts, ref) - if erd != nil { - return fmt.Errorf("at %s, %v", k, erd) - } - debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) - refsToReplace[k] = SchemaRef{ - Name: k, // caller - Ref: replacingRef, // callee - Schema: sch, - TopLevel: slashpath.Dir(replacingRef.String()) == definitionsPath, - } - } - depthFirst := sortDepthFirst(refsToReplace) - namer := &inlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: opRefsByRef(gatherOperations(opts.Spec, nil)), - flattenContext: opts.flattenContext, - opts: opts, - } - - for _, key := range depthFirst { - v := refsToReplace[key] - // update current replacement, which may have been updated by previous changes of deeper elements - replacingRef, sch, erd := deepestRef(opts, v.Ref) - if erd != nil { - return fmt.Errorf("at %s, %v", key, erd) - } - v.Ref = replacingRef - v.Schema = sch - v.TopLevel = slashpath.Dir(replacingRef.String()) == definitionsPath - debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) - - if v.TopLevel { - debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) - // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref - if err := updateRef(opts.Swagger(), key, v.Ref); err != nil { - return err - } - } else { - // this is a JSON pointer to an anonymous document (internal or external): - // create a definition for this schema when: - // - it is a complex schema - // - or it is pointed by more than one $ref (i.e. expresses commonality) - // otherwise, expand the pointer (single reference to a simple type) - // - // The named definition for this follows the target's key, not the caller's - debugLog("namePointers at %s for %s", key, v.Ref.String()) - - // qualify the expanded schema - /* - if key == "#/paths/~1some~1where~1{id}/get/parameters/1/items" { - // DEBUG - //func getPointerFromKey(spec interface{}, key string) (string, interface{}, error) { - k, res, err := getPointerFromKey(namer.Spec, key) - debugLog("k = %s, res=%#v, err=%v", k, res, err) - } - */ - asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if ers != nil { - return fmt.Errorf("schema analysis [%s]: %v", key, ers) - } - callers := make([]string, 0, 64) - - debugLog("looking for callers") - an := New(opts.Swagger()) - for k, w := range an.references.allRefs { - r, _, erd := deepestRef(opts, w) - if erd != nil { - return fmt.Errorf("at %s, %v", key, erd) - } - if r.String() == v.Ref.String() { - callers = append(callers, k) - } - } - debugLog("callers for %s: %d", v.Ref.String(), len(callers)) - if len(callers) == 0 { - // has already been updated and resolved - continue - } - - parts := keyParts(v.Ref.String()) - debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) - // identifying edge case when the namer did nothing because we point to a non-schema object - // no definition is created and we expand the $ref for all callers - if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { - debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) - if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { - return err - } - - // regular case: we named the $ref as a definition, and we move all callers to this new $ref - for _, caller := range callers { - if caller != key { - // move $ref for next to resolve - debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) - c := refsToReplace[caller] - c.Ref = v.Ref - refsToReplace[caller] = c - } - } - } else { - debugLog("expand JSON pointer for key=%s", key) - if err := updateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { - return err - } - // NOTE: there is no other caller to update - } - } - } - opts.Spec.reload() // re-analyze - return nil -} - -// deepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. -// - if no definition is found, returns the deepest ref. -// - pointers to external files are expanded -// -// NOTE: all external $ref's are assumed to be already expanded at this stage. -func deepestRef(opts *FlattenOpts, ref swspec.Ref) (swspec.Ref, *swspec.Schema, error) { - if !ref.HasFragmentOnly { - // we found an external $ref, which is odd - // does nothing on external $refs - return ref, nil, nil - } - currentRef := ref - visited := make(map[string]bool, 64) -DOWNREF: - for currentRef.String() != "" { - if slashpath.Dir(currentRef.String()) == definitionsPath { - // this is a top-level definition: stop here and return this ref - return currentRef, nil, nil - } - if _, beenThere := visited[currentRef.String()]; beenThere { - return swspec.Ref{}, nil, - fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) - } - visited[currentRef.String()] = true - value, _, err := currentRef.GetPointer().Get(opts.Swagger()) - if err != nil { - return swspec.Ref{}, nil, err - } - switch refable := value.(type) { - case *swspec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case swspec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case *swspec.SchemaOrArray: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case *swspec.SchemaOrBool: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case swspec.Response: - // a pointer points to a schema initially marshalled in responses section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema swspec.Schema - err := asSchema.UnmarshalJSON(asJSON) - if err != nil { - return swspec.Ref{}, nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) - - } - opts.flattenContext.warnings = append(opts.flattenContext.warnings, - fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - case swspec.Parameter: - // a pointer points to a schema initially marshalled in parameters section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema swspec.Schema - err := asSchema.UnmarshalJSON(asJSON) - if err != nil { - return swspec.Ref{}, nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) - - } - opts.flattenContext.warnings = append(opts.flattenContext.warnings, - fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - default: - return swspec.Ref{}, nil, - fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", - currentRef.String(), value) - - } - } - // assess what schema we're ending with - sch, erv := swspec.ResolveRefWithBase(opts.Swagger(), ¤tRef, opts.ExpandOpts(false)) - if erv != nil { - return swspec.Ref{}, nil, erv - } - if sch == nil { - return swspec.Ref{}, nil, fmt.Errorf("no schema found at %s", currentRef.String()) - } - return currentRef, sch, nil -} - -// normalizeRef strips the current file from any $ref. This works around issue go-openapi/spec#76: -// leading absolute file in $ref is stripped -func normalizeRef(opts *FlattenOpts) error { - debugLog("normalizeRef") - opts.Spec.reload() // re-analyze - for k, w := range opts.Spec.references.allRefs { - if strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS - // strip base path from definition - debugLog("stripping absolute path for: %s", w.String()) - if err := updateRef(opts.Swagger(), k, - swspec.MustCreateRef(slashpath.Join(definitionsPath, slashpath.Base(w.String())))); err != nil { - return err - } - } - } - opts.Spec.reload() // re-analyze - return nil -} diff --git a/vendor/github.com/go-openapi/analysis/go.mod b/vendor/github.com/go-openapi/analysis/go.mod deleted file mode 100644 index 6c8e58577cf5..000000000000 --- a/vendor/github.com/go-openapi/analysis/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/go-openapi/analysis - -require ( - github.com/go-openapi/jsonpointer v0.19.3 - github.com/go-openapi/loads v0.19.0 - github.com/go-openapi/spec v0.19.3 - github.com/go-openapi/strfmt v0.19.3 - github.com/go-openapi/swag v0.19.5 - github.com/stretchr/testify v1.3.0 - go.mongodb.org/mongo-driver v1.1.1 // indirect -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/analysis/go.sum b/vendor/github.com/go-openapi/analysis/go.sum deleted file mode 100644 index 8e8b5f9bc474..000000000000 --- a/vendor/github.com/go-openapi/analysis/go.sum +++ /dev/null @@ -1,97 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/loads v0.19.0 h1:wCOBNscACI8L93tt5tvB2zOMkJ098XCw3fP0BY2ybDA= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.2 h1:clPGfBnJohokno0e+d7hs6Yocrzjlgz6EsQSDncCRnE= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/analysis/internal/post_go18.go b/vendor/github.com/go-openapi/analysis/internal/post_go18.go deleted file mode 100644 index f96f55c08733..000000000000 --- a/vendor/github.com/go-openapi/analysis/internal/post_go18.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build go1.8 - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import "net/url" - -// PathUnescape provides url.PathUnescape(), with seamless -// go version support for pre-go1.8 -// -// TODO: this function is currently defined in go-openapi/swag, -// but unexported. We might chose to export it, or simple phase -// out pre-go1.8 support. -func PathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/vendor/github.com/go-openapi/analysis/internal/pre_go18.go b/vendor/github.com/go-openapi/analysis/internal/pre_go18.go deleted file mode 100644 index 4cc644182209..000000000000 --- a/vendor/github.com/go-openapi/analysis/internal/pre_go18.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !go1.8 - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import "net/url" - -// PathUnescape provides url.PathUnescape(), with seamless -// go version support for pre-go1.8 -// -// TODO: this function is currently defined in go-openapi/swag, -// but unexported. We might chose to export it, or simple phase -// out pre-go1.8 support. -func PathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go deleted file mode 100644 index 625c46f8f9fb..000000000000 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ /dev/null @@ -1,425 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "reflect" - - "github.com/go-openapi/spec" -) - -// Mixin modifies the primary swagger spec by adding the paths and -// definitions from the mixin specs. Top level parameters and -// responses from the mixins are also carried over. Operation id -// collisions are avoided by appending "Mixin" but only if -// needed. -// -// The following parts of primary are subject to merge, filling empty details -// - Info -// - BasePath -// - Host -// - ExternalDocs -// -// Consider calling FixEmptyResponseDescriptions() on the modified primary -// if you read them from storage and they are valid to start with. -// -// Entries in "paths", "definitions", "parameters" and "responses" are -// added to the primary in the order of the given mixins. If the entry -// already exists in primary it is skipped with a warning message. -// -// The count of skipped entries (from collisions) is returned so any -// deviation from the number expected can flag a warning in your build -// scripts. Carefully review the collisions before accepting them; -// consider renaming things if possible. -// -// No key normalization takes place (paths, type defs, -// etc). Ensure they are canonical if your downstream tools do -// key normalization of any form. -// -// Merging schemes (http, https), and consumers/producers do not account for -// collisions. -func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { - skipped := make([]string, 0, len(mixins)) - opIds := getOpIds(primary) - initPrimary(primary) - - for i, m := range mixins { - skipped = append(skipped, mergeSwaggerProps(primary, m)...) - - skipped = append(skipped, mergeConsumes(primary, m)...) - - skipped = append(skipped, mergeProduces(primary, m)...) - - skipped = append(skipped, mergeTags(primary, m)...) - - skipped = append(skipped, mergeSchemes(primary, m)...) - - skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) - - skipped = append(skipped, mergeSecurityRequirements(primary, m)...) - - skipped = append(skipped, mergeDefinitions(primary, m)...) - - // merging paths requires a map of operationIDs to work with - skipped = append(skipped, mergePaths(primary, m, opIds, i)...) - - skipped = append(skipped, mergeParameters(primary, m)...) - - skipped = append(skipped, mergeResponses(primary, m)...) - } - return skipped -} - -// getOpIds extracts all the paths..operationIds from the given -// spec and returns them as the keys in a map with 'true' values. -func getOpIds(s *spec.Swagger) map[string]bool { - rv := make(map[string]bool) - if s.Paths == nil { - return rv - } - for _, v := range s.Paths.Paths { - piops := pathItemOps(v) - for _, op := range piops { - rv[op.ID] = true - } - } - return rv -} - -func pathItemOps(p spec.PathItem) []*spec.Operation { - var rv []*spec.Operation - rv = appendOp(rv, p.Get) - rv = appendOp(rv, p.Put) - rv = appendOp(rv, p.Post) - rv = appendOp(rv, p.Delete) - rv = appendOp(rv, p.Head) - rv = appendOp(rv, p.Patch) - return rv -} - -func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { - if op == nil { - return ops - } - return append(ops, op) -} - -func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.SecurityDefinitions { - if _, exists := primary.SecurityDefinitions[k]; exists { - warn := fmt.Sprintf( - "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - primary.SecurityDefinitions[k] = v - } - return -} - -func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Security { - found := false - for _, vv := range primary.Security { - if reflect.DeepEqual(v, vv) { - found = true - break - } - } - if found { - warn := fmt.Sprintf( - "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) - skipped = append(skipped, warn) - continue - } - primary.Security = append(primary.Security, v) - } - return -} - -func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Definitions { - // assume name collisions represent IDENTICAL type. careful. - if _, exists := primary.Definitions[k]; exists { - warn := fmt.Sprintf( - "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - primary.Definitions[k] = v - } - return -} - -func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { - if m.Paths != nil { - for k, v := range m.Paths.Paths { - if _, exists := primary.Paths.Paths[k]; exists { - warn := fmt.Sprintf( - "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - - // Swagger requires that operationIds be - // unique within a spec. If we find a - // collision we append "Mixin0" to the - // operatoinId we are adding, where 0 is mixin - // index. We assume that operationIds with - // all the proivded specs are already unique. - piops := pathItemOps(v) - for _, piop := range piops { - if opIds[piop.ID] { - piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) - } - opIds[piop.ID] = true - } - primary.Paths.Paths[k] = v - } - } - return -} - -func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Parameters { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Parameters[k]; exists { - warn := fmt.Sprintf( - "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - primary.Parameters[k] = v - } - return -} - -func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Responses { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Responses[k]; exists { - warn := fmt.Sprintf( - "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - continue - } - primary.Responses[k] = v - } - return skipped -} - -func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Consumes { - found := false - for _, vv := range primary.Consumes { - if v == vv { - found = true - break - } - } - if found { - // no warning here: we just skip it - continue - } - primary.Consumes = append(primary.Consumes, v) - } - return []string{} -} - -func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Produces { - found := false - for _, vv := range primary.Produces { - if v == vv { - found = true - break - } - } - if found { - // no warning here: we just skip it - continue - } - primary.Produces = append(primary.Produces, v) - } - return []string{} -} - -func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Tags { - found := false - for _, vv := range primary.Tags { - if v.Name == vv.Name { - found = true - break - } - } - if found { - warn := fmt.Sprintf( - "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", v.Name) - skipped = append(skipped, warn) - continue - } - primary.Tags = append(primary.Tags, v) - } - return -} - -func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Schemes { - found := false - for _, vv := range primary.Schemes { - if v == vv { - found = true - break - } - } - if found { - // no warning here: we just skip it - continue - } - primary.Schemes = append(primary.Schemes, v) - } - return []string{} -} - -func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { - var skipped []string - primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) - - // merging details in swagger top properties - if primary.Host == "" { - primary.Host = m.Host - } - if primary.BasePath == "" { - primary.BasePath = m.BasePath - } - if primary.Info == nil { - primary.Info = m.Info - } else if m.Info != nil { - var sk []string - primary.Info.Extensions, sk = mergeExtensions(primary.Info.Extensions, m.Info.Extensions) - skipped = append(skipped, sk...) - if primary.Info.Description == "" { - primary.Info.Description = m.Info.Description - } - if primary.Info.Title == "" { - primary.Info.Description = m.Info.Description - } - if primary.Info.TermsOfService == "" { - primary.Info.TermsOfService = m.Info.TermsOfService - } - if primary.Info.Version == "" { - primary.Info.Version = m.Info.Version - } - - if primary.Info.Contact == nil { - primary.Info.Contact = m.Info.Contact - } else if m.Info.Contact != nil { - if primary.Info.Contact.Name == "" { - primary.Info.Contact.Name = m.Info.Contact.Name - } - if primary.Info.Contact.URL == "" { - primary.Info.Contact.URL = m.Info.Contact.URL - } - if primary.Info.Contact.Email == "" { - primary.Info.Contact.Email = m.Info.Contact.Email - } - } - - if primary.Info.License == nil { - primary.Info.License = m.Info.License - } else if m.Info.License != nil { - if primary.Info.License.Name == "" { - primary.Info.License.Name = m.Info.License.Name - } - if primary.Info.License.URL == "" { - primary.Info.License.URL = m.Info.License.URL - } - } - - } - if primary.ExternalDocs == nil { - primary.ExternalDocs = m.ExternalDocs - } else if m.ExternalDocs != nil { - if primary.ExternalDocs.Description == "" { - primary.ExternalDocs.Description = m.ExternalDocs.Description - } - if primary.ExternalDocs.URL == "" { - primary.ExternalDocs.URL = m.ExternalDocs.URL - } - } - return skipped -} - -func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { - if primary == nil { - result = m - return - } - if m == nil { - result = primary - return - } - result = primary - for k, v := range m { - if _, found := primary[k]; found { - skipped = append(skipped, k) - continue - } - primary[k] = v - } - return -} - -func initPrimary(primary *spec.Swagger) { - if primary.SecurityDefinitions == nil { - primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) - } - if primary.Security == nil { - primary.Security = make([]map[string][]string, 0, 10) - } - if primary.Produces == nil { - primary.Produces = make([]string, 0, 10) - } - if primary.Consumes == nil { - primary.Consumes = make([]string, 0, 10) - } - if primary.Tags == nil { - primary.Tags = make([]spec.Tag, 0, 10) - } - if primary.Schemes == nil { - primary.Schemes = make([]string, 0, 10) - } - if primary.Paths == nil { - primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} - } - if primary.Paths.Paths == nil { - primary.Paths.Paths = make(map[string]spec.PathItem) - } - if primary.Definitions == nil { - primary.Definitions = make(spec.Definitions) - } - if primary.Parameters == nil { - primary.Parameters = make(map[string]spec.Parameter) - } - if primary.Responses == nil { - primary.Responses = make(map[string]spec.Response) - } -} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go deleted file mode 100644 index 398c7806394d..000000000000 --- a/vendor/github.com/go-openapi/analysis/schema.go +++ /dev/null @@ -1,234 +0,0 @@ -package analysis - -import ( - "fmt" - - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" -) - -// SchemaOpts configures the schema analyzer -type SchemaOpts struct { - Schema *spec.Schema - Root interface{} - BasePath string - _ struct{} -} - -// Schema analysis, will classify the schema according to known -// patterns. -func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { - if opts.Schema == nil { - return nil, fmt.Errorf("no schema to analyze") - } - - a := &AnalyzedSchema{ - schema: opts.Schema, - root: opts.Root, - basePath: opts.BasePath, - } - - a.initializeFlags() - a.inferKnownType() - a.inferEnum() - a.inferBaseType() - - if err := a.inferMap(); err != nil { - return nil, err - } - if err := a.inferArray(); err != nil { - return nil, err - } - - a.inferTuple() - - if err := a.inferFromRef(); err != nil { - return nil, err - } - - a.inferSimpleSchema() - return a, nil -} - -// AnalyzedSchema indicates what the schema represents -type AnalyzedSchema struct { - schema *spec.Schema - root interface{} - basePath string - - hasProps bool - hasAllOf bool - hasItems bool - hasAdditionalProps bool - hasAdditionalItems bool - hasRef bool - - IsKnownType bool - IsSimpleSchema bool - IsArray bool - IsSimpleArray bool - IsMap bool - IsSimpleMap bool - IsExtendedObject bool - IsTuple bool - IsTupleWithExtra bool - IsBaseType bool - IsEnum bool -} - -// Inherits copies value fields from other onto this schema -func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { - if other == nil { - return - } - a.hasProps = other.hasProps - a.hasAllOf = other.hasAllOf - a.hasItems = other.hasItems - a.hasAdditionalItems = other.hasAdditionalItems - a.hasAdditionalProps = other.hasAdditionalProps - a.hasRef = other.hasRef - - a.IsKnownType = other.IsKnownType - a.IsSimpleSchema = other.IsSimpleSchema - a.IsArray = other.IsArray - a.IsSimpleArray = other.IsSimpleArray - a.IsMap = other.IsMap - a.IsSimpleMap = other.IsSimpleMap - a.IsExtendedObject = other.IsExtendedObject - a.IsTuple = other.IsTuple - a.IsTupleWithExtra = other.IsTupleWithExtra - a.IsBaseType = other.IsBaseType - a.IsEnum = other.IsEnum -} - -func (a *AnalyzedSchema) inferFromRef() error { - if a.hasRef { - sch := new(spec.Schema) - sch.Ref = a.schema.Ref - err := spec.ExpandSchema(sch, a.root, nil) - if err != nil { - return err - } - rsch, err := Schema(SchemaOpts{ - Schema: sch, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - // NOTE(fredbi): currently the only cause for errors is - // unresolved ref. Since spec.ExpandSchema() expands the - // schema recursively, there is no chance to get there, - // until we add more causes for error in this schema analysis. - return err - } - a.inherits(rsch) - } - return nil -} - -func (a *AnalyzedSchema) inferSimpleSchema() { - a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap -} - -func (a *AnalyzedSchema) inferKnownType() { - tpe := a.schema.Type - format := a.schema.Format - a.IsKnownType = tpe.Contains("boolean") || - tpe.Contains("integer") || - tpe.Contains("number") || - tpe.Contains("string") || - (format != "" && strfmt.Default.ContainsName(format)) || - (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) -} - -func (a *AnalyzedSchema) inferMap() error { - if a.isObjectType() { - hasExtra := a.hasProps || a.hasAllOf - a.IsMap = a.hasAdditionalProps && !hasExtra - a.IsExtendedObject = a.hasAdditionalProps && hasExtra - if a.IsMap { - if a.schema.AdditionalProperties.Schema != nil { - msch, err := Schema(SchemaOpts{ - Schema: a.schema.AdditionalProperties.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - a.IsSimpleMap = msch.IsSimpleSchema - } else if a.schema.AdditionalProperties.Allows { - a.IsSimpleMap = true - } - } - } - return nil -} - -func (a *AnalyzedSchema) inferArray() error { - // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple - // (yes, even if the Items array contains only one element). - // arrays in JSON schema may be unrestricted (i.e no Items specified). - // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. - // - // NOTE: the spec package misses the distinction between: - // items: [] and items: {}, so we consider both arrays here. - a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) - if a.IsArray && a.hasItems { - if a.schema.Items.Schema != nil { - itsch, err := Schema(SchemaOpts{ - Schema: a.schema.Items.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - a.IsSimpleArray = itsch.IsSimpleSchema - } - } - if a.IsArray && !a.hasItems { - a.IsSimpleArray = true - } - return nil -} - -func (a *AnalyzedSchema) inferTuple() { - tuple := a.hasItems && a.schema.Items.Schemas != nil - a.IsTuple = tuple && !a.hasAdditionalItems - a.IsTupleWithExtra = tuple && a.hasAdditionalItems -} - -func (a *AnalyzedSchema) inferBaseType() { - if a.isObjectType() { - a.IsBaseType = a.schema.Discriminator != "" - } -} - -func (a *AnalyzedSchema) inferEnum() { - a.IsEnum = len(a.schema.Enum) > 0 -} - -func (a *AnalyzedSchema) initializeFlags() { - a.hasProps = len(a.schema.Properties) > 0 - a.hasAllOf = len(a.schema.AllOf) > 0 - a.hasRef = a.schema.Ref.String() != "" - - a.hasItems = a.schema.Items != nil && - (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) - - a.hasAdditionalProps = a.schema.AdditionalProperties != nil && - (a.schema.AdditionalProperties != nil || a.schema.AdditionalProperties.Allows) - - a.hasAdditionalItems = a.schema.AdditionalItems != nil && - (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) - -} - -func (a *AnalyzedSchema) isObjectType() bool { - return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) -} - -func (a *AnalyzedSchema) isArrayType() bool { - return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) -} diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml deleted file mode 100644 index 6badaf1549f8..000000000000 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ /dev/null @@ -1,20 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals diff --git a/vendor/github.com/go-openapi/errors/.travis.yml b/vendor/github.com/go-openapi/errors/.travis.yml deleted file mode 100644 index ba8a6d5918fc..000000000000 --- a/vendor/github.com/go-openapi/errors/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: gZGp9NaHxi7zawlXJXKY92BGeDR1x0tbIcTyU5nMKLq0fhIaiEBJEeALwZ4VgqsSv3DytSSF5mLH8fevAM3ixE6hxjKQ+lQuf7V/w3btCN1CSWgoua5LOh1kTnqZQtJuRvO4pzoJcT3bJWBsVZ07VGNVzzJEy/zAKCHFqBUCXShw7QemlLBcYWFNqveTlvDIfCzvouoLnPoXwxEpkjxe9uz/ZKZgAnup/fXjC8RFctmgCnkCyvJTk0Y/fZCsufixJrJhshBWTnlrFCzRmgNkz2d+i1Ls3+MJ5EJJ2Tx/A5S63dL49J1f9Kr0AKHADmulSy8JNzIckKwbyFMYUecrsW+Lsu9DhnVMy1jj5pKsJDLRi2iIU3fXTMWbcyQbXjbbnBO2mPdP3Tzme75y4D9fc8hUPeyqVv2BU26NEbQ7EF2pKJ93OXvci7HlwRBgdJa8j6mP2LEDClcPQW00g7N/OZe0cTOMa8L5AwiBlbArwqt9wv6YLJoTG0wpDhzWsFvbCg5bJxe28Yn3fIDD0Lk1I7iSnBbp/5gzF19jmxqvcT8tHRkDL4xfjbENFTZjA5uB4Z4pj4WSyWQILLV/Jwhe3fi9uQwdviFHfj5pnVrmNUiGSOQL672K5wl2c3E9mGwejvsu2dfEz28n7Y/FUnOpY3/cBS0n27JJaerS0zMKNLE= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e37a..000000000000 --- a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md deleted file mode 100644 index 0ce50b23b2e9..000000000000 --- a/vendor/github.com/go-openapi/errors/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# OpenAPI errors [![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/errors?status.svg)](http://godoc.org/github.com/go-openapi/errors) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) - -Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go deleted file mode 100644 index 7667cee76c9c..000000000000 --- a/vendor/github.com/go-openapi/errors/api.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" - "net/http" - "reflect" - "strings" -) - -// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. -var DefaultHTTPCode = http.StatusUnprocessableEntity - -// Error represents a error interface all swagger framework errors implement -type Error interface { - error - Code() int32 -} - -type apiError struct { - code int32 - message string -} - -func (a *apiError) Error() string { - return a.message -} - -func (a *apiError) Code() int32 { - return a.code -} - -// New creates a new API error with a code and a message -func New(code int32, message string, args ...interface{}) Error { - if len(args) > 0 { - return &apiError{code, fmt.Sprintf(message, args...)} - } - return &apiError{code, message} -} - -// NotFound creates a new not found error -func NotFound(message string, args ...interface{}) Error { - if message == "" { - message = "Not found" - } - return New(http.StatusNotFound, fmt.Sprintf(message, args...)) -} - -// NotImplemented creates a new not implemented error -func NotImplemented(message string) Error { - return New(http.StatusNotImplemented, message) -} - -// MethodNotAllowedError represents an error for when the path matches but the method doesn't -type MethodNotAllowedError struct { - code int32 - Allowed []string - message string -} - -func (m *MethodNotAllowedError) Error() string { - return m.message -} - -// Code the error code -func (m *MethodNotAllowedError) Code() int32 { - return m.code -} - -func errorAsJSON(err Error) []byte { - b, _ := json.Marshal(struct { - Code int32 `json:"code"` - Message string `json:"message"` - }{err.Code(), err.Error()}) - return b -} - -func flattenComposite(errs *CompositeError) *CompositeError { - var res []error - for _, er := range errs.Errors { - switch e := er.(type) { - case *CompositeError: - if len(e.Errors) > 0 { - flat := flattenComposite(e) - if len(flat.Errors) > 0 { - res = append(res, flat.Errors...) - } - } - default: - if e != nil { - res = append(res, e) - } - } - } - return CompositeValidationError(res...) -} - -// MethodNotAllowed creates a new method not allowed error -func MethodNotAllowed(requested string, allow []string) Error { - msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) - return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} -} - -// ServeError the error handler interface implementation -func ServeError(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Set("Content-Type", "application/json") - switch e := err.(type) { - case *CompositeError: - er := flattenComposite(e) - // strips composite errors to first element only - if len(er.Errors) > 0 { - ServeError(rw, r, er.Errors[0]) - } else { - // guard against empty CompositeError (invalid construct) - ServeError(rw, r, nil) - } - case *MethodNotAllowedError: - rw.Header().Add("Allow", strings.Join(err.(*MethodNotAllowedError).Allowed, ",")) - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case Error: - value := reflect.ValueOf(e) - if value.Kind() == reflect.Ptr && value.IsNil() { - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - return - } - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case nil: - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - default: - rw.WriteHeader(http.StatusInternalServerError) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) - } - } -} - -func asHTTPCode(input int) int { - if input >= 600 { - return DefaultHTTPCode - } - return input -} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go deleted file mode 100644 index 0545b501bd7c..000000000000 --- a/vendor/github.com/go-openapi/errors/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import "net/http" - -// Unauthenticated returns an unauthenticated error -func Unauthenticated(scheme string) Error { - return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) -} diff --git a/vendor/github.com/go-openapi/errors/go.mod b/vendor/github.com/go-openapi/errors/go.mod deleted file mode 100644 index 084143001f02..000000000000 --- a/vendor/github.com/go-openapi/errors/go.mod +++ /dev/null @@ -1,6 +0,0 @@ -module github.com/go-openapi/errors - -require ( - github.com/stretchr/objx v0.2.0 // indirect - github.com/stretchr/testify v1.3.0 -) diff --git a/vendor/github.com/go-openapi/errors/go.sum b/vendor/github.com/go-openapi/errors/go.sum deleted file mode 100644 index e7314e279fb5..000000000000 --- a/vendor/github.com/go-openapi/errors/go.sum +++ /dev/null @@ -1,9 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go deleted file mode 100644 index 0360c094ea31..000000000000 --- a/vendor/github.com/go-openapi/errors/headers.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "fmt" - "net/http" -) - -// Validation represents a failure of a precondition -type Validation struct { - code int32 - Name string - In string - Value interface{} - message string - Values []interface{} -} - -func (e *Validation) Error() string { - return e.message -} - -// Code the error code -func (e *Validation) Code() int32 { - return e.code -} - -// ValidateName produces an error message name for an aliased property -func (e *Validation) ValidateName(name string) *Validation { - if e.Name == "" && name != "" { - e.Name = name - e.message = name + e.message - } - return e -} - -const ( - contentTypeFail = `unsupported media type %q, only %v are allowed` - responseFormatFail = `unsupported media type requested, only %v are available` -) - -// InvalidContentType error for an invalid content type -func InvalidContentType(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusUnsupportedMediaType, - Name: "Content-Type", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(contentTypeFail, value, allowed), - } -} - -// InvalidResponseFormat error for an unacceptable response format request -func InvalidResponseFormat(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusNotAcceptable, - Name: "Accept", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(responseFormatFail, allowed), - } -} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go deleted file mode 100644 index 6390d4636aa3..000000000000 --- a/vendor/github.com/go-openapi/errors/middleware.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "bytes" - "fmt" - "strings" -) - -// APIVerificationFailed is an error that contains all the missing info for a mismatched section -// between the api registrations and the api spec -type APIVerificationFailed struct { - Section string - MissingSpecification []string - MissingRegistration []string -} - -// -func (v *APIVerificationFailed) Error() string { - buf := bytes.NewBuffer(nil) - - hasRegMissing := len(v.MissingRegistration) > 0 - hasSpecMissing := len(v.MissingSpecification) > 0 - - if hasRegMissing { - buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) - } - - if hasRegMissing && hasSpecMissing { - buf.WriteString("\n") - } - - if hasSpecMissing { - buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) - } - - return buf.String() -} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go deleted file mode 100644 index 1bae87302a6b..000000000000 --- a/vendor/github.com/go-openapi/errors/parsing.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import "fmt" - -// ParseError respresents a parsing error -type ParseError struct { - code int32 - Name string - In string - Value string - Reason error - message string -} - -func (e *ParseError) Error() string { - return e.message -} - -// Code returns the http status code for this error -func (e *ParseError) Code() int32 { - return e.code -} - -const ( - parseErrorTemplContent = `parsing %s %s from %q failed, because %s` - parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` -) - -// NewParseError creates a new parse error -func NewParseError(name, in, value string, reason error) *ParseError { - var msg string - if in == "" { - msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) - } else { - msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) - } - return &ParseError{ - code: 400, - Name: name, - In: in, - Value: value, - Reason: reason, - message: msg, - } -} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go deleted file mode 100644 index 14fb2c5f116d..000000000000 --- a/vendor/github.com/go-openapi/errors/schema.go +++ /dev/null @@ -1,562 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "fmt" - "strings" -) - -const ( - invalidType = "%s is an invalid type name" - typeFail = "%s in %s must be of type %s" - typeFailWithData = "%s in %s must be of type %s: %q" - typeFailWithError = "%s in %s must be of type %s, because: %s" - requiredFail = "%s in %s is required" - tooLongMessage = "%s in %s should be at most %d chars long" - tooShortMessage = "%s in %s should be at least %d chars long" - patternFail = "%s in %s should match '%s'" - enumFail = "%s in %s should be one of %v" - multipleOfFail = "%s in %s should be a multiple of %v" - maxIncFail = "%s in %s should be less than or equal to %v" - maxExcFail = "%s in %s should be less than %v" - minIncFail = "%s in %s should be greater than or equal to %v" - minExcFail = "%s in %s should be greater than %v" - uniqueFail = "%s in %s shouldn't contain duplicates" - maxItemsFail = "%s in %s should have at most %d items" - minItemsFail = "%s in %s should have at least %d items" - typeFailNoIn = "%s must be of type %s" - typeFailWithDataNoIn = "%s must be of type %s: %q" - typeFailWithErrorNoIn = "%s must be of type %s, because: %s" - requiredFailNoIn = "%s is required" - tooLongMessageNoIn = "%s should be at most %d chars long" - tooShortMessageNoIn = "%s should be at least %d chars long" - patternFailNoIn = "%s should match '%s'" - enumFailNoIn = "%s should be one of %v" - multipleOfFailNoIn = "%s should be a multiple of %v" - maxIncFailNoIn = "%s should be less than or equal to %v" - maxExcFailNoIn = "%s should be less than %v" - minIncFailNoIn = "%s should be greater than or equal to %v" - minExcFailNoIn = "%s should be greater than %v" - uniqueFailNoIn = "%s shouldn't contain duplicates" - maxItemsFailNoIn = "%s should have at most %d items" - minItemsFailNoIn = "%s should have at least %d items" - noAdditionalItems = "%s in %s can't have additional items" - noAdditionalItemsNoIn = "%s can't have additional items" - tooFewProperties = "%s in %s should have at least %d properties" - tooFewPropertiesNoIn = "%s should have at least %d properties" - tooManyProperties = "%s in %s should have at most %d properties" - tooManyPropertiesNoIn = "%s should have at most %d properties" - unallowedProperty = "%s.%s in %s is a forbidden property" - unallowedPropertyNoIn = "%s.%s is a forbidden property" - failedAllPatternProps = "%s.%s in %s failed all pattern properties" - failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" - multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" -) - -// All code responses can be used to differentiate errors for different handling -// by the consuming program -const ( - // CompositeErrorCode remains 422 for backwards-compatibility - // and to separate it from validation errors with cause - CompositeErrorCode = 422 - // InvalidTypeCode is used for any subclass of invalid types - InvalidTypeCode = 600 + iota - RequiredFailCode - TooLongFailCode - TooShortFailCode - PatternFailCode - EnumFailCode - MultipleOfFailCode - MaxFailCode - MinFailCode - UniqueFailCode - MaxItemsFailCode - MinItemsFailCode - NoAdditionalItemsCode - TooFewPropertiesCode - TooManyPropertiesCode - UnallowedPropertyCode - FailedAllPatternPropsCode - MultipleOfMustBePositiveCode -) - -// CompositeError is an error that groups several errors together -type CompositeError struct { - Errors []error - code int32 - message string -} - -// Code for this error -func (c *CompositeError) Code() int32 { - return c.code -} - -func (c *CompositeError) Error() string { - if len(c.Errors) > 0 { - msgs := []string{c.message + ":"} - for _, e := range c.Errors { - msgs = append(msgs, e.Error()) - } - return strings.Join(msgs, "\n") - } - return c.message -} - -// CompositeValidationError an error to wrap a bunch of other errors -func CompositeValidationError(errors ...error) *CompositeError { - return &CompositeError{ - code: CompositeErrorCode, - Errors: append([]error{}, errors...), - message: "validation failure list", - } -} - -// FailedAllPatternProperties an error for when the property doesn't match a pattern -func FailedAllPatternProperties(name, in, key string) *Validation { - msg := fmt.Sprintf(failedAllPatternProps, name, key, in) - if in == "" { - msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) - } - return &Validation{ - code: FailedAllPatternPropsCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// PropertyNotAllowed an error for when the property doesn't match a pattern -func PropertyNotAllowed(name, in, key string) *Validation { - msg := fmt.Sprintf(unallowedProperty, name, key, in) - if in == "" { - msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) - } - return &Validation{ - code: UnallowedPropertyCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// TooFewProperties an error for an object with too few properties -func TooFewProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooFewProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) - } - return &Validation{ - code: TooFewPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// TooManyProperties an error for an object with too many properties -func TooManyProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooManyProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) - } - return &Validation{ - code: TooManyPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// AdditionalItemsNotAllowed an error for invalid additional items -func AdditionalItemsNotAllowed(name, in string) *Validation { - msg := fmt.Sprintf(noAdditionalItems, name, in) - if in == "" { - msg = fmt.Sprintf(noAdditionalItemsNoIn, name) - } - return &Validation{ - code: NoAdditionalItemsCode, - Name: name, - In: in, - message: msg, - } -} - -// InvalidCollectionFormat another flavor of invalid type error -func InvalidCollectionFormat(name, in, format string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: format, - message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), - } -} - -// InvalidTypeName an error for when the type is invalid -func InvalidTypeName(typeName string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Value: typeName, - message: fmt.Sprintf(invalidType, typeName), - } -} - -// InvalidType creates an error for when the type is invalid -func InvalidType(name, in, typeName string, value interface{}) *Validation { - var message string - - if in != "" { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) - default: - message = fmt.Sprintf(typeFail, name, in, typeName) - } - } else { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) - default: - message = fmt.Sprintf(typeFailNoIn, name, typeName) - } - } - - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: value, - message: message, - } - -} - -// DuplicateItems error for when an array contains duplicates -func DuplicateItems(name, in string) *Validation { - msg := fmt.Sprintf(uniqueFail, name, in) - if in == "" { - msg = fmt.Sprintf(uniqueFailNoIn, name) - } - return &Validation{ - code: UniqueFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooManyItems error for when an array contains too many items -func TooManyItems(name, in string, max int64) *Validation { - msg := fmt.Sprintf(maxItemsFail, name, in, max) - if in == "" { - msg = fmt.Sprintf(maxItemsFailNoIn, name, max) - } - - return &Validation{ - code: MaxItemsFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooFewItems error for when an array contains too few items -func TooFewItems(name, in string, min int64) *Validation { - msg := fmt.Sprintf(minItemsFail, name, in, min) - if in == "" { - msg = fmt.Sprintf(minItemsFailNoIn, name, min) - } - return &Validation{ - code: MinItemsFailCode, - Name: name, - In: in, - message: msg, - } -} - -// ExceedsMaximumInt error for when maxinum validation fails -func ExceedsMaximumInt(name, in string, max int64, exclusive bool) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: max, - message: message, - } -} - -// ExceedsMaximumUint error for when maxinum validation fails -func ExceedsMaximumUint(name, in string, max uint64, exclusive bool) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: max, - message: message, - } -} - -// ExceedsMaximum error for when maxinum validation fails -func ExceedsMaximum(name, in string, max float64, exclusive bool) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: max, - message: message, - } -} - -// ExceedsMinimumInt error for when maxinum validation fails -func ExceedsMinimumInt(name, in string, min int64, exclusive bool) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: min, - message: message, - } -} - -// ExceedsMinimumUint error for when maxinum validation fails -func ExceedsMinimumUint(name, in string, min uint64, exclusive bool) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: min, - message: message, - } -} - -// ExceedsMinimum error for when maxinum validation fails -func ExceedsMinimum(name, in string, min float64, exclusive bool) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: min, - message: message, - } -} - -// NotMultipleOf error for when multiple of validation fails -func NotMultipleOf(name, in string, multiple interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) - } else { - msg = fmt.Sprintf(multipleOfFail, name, in, multiple) - } - return &Validation{ - code: MultipleOfFailCode, - Name: name, - In: in, - Value: multiple, - message: msg, - } -} - -// EnumFail error for when an enum validation fails -func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(enumFailNoIn, name, values) - } else { - msg = fmt.Sprintf(enumFail, name, in, values) - } - - return &Validation{ - code: EnumFailCode, - Name: name, - In: in, - Value: value, - Values: values, - message: msg, - } -} - -// Required error for when a value is missing -func Required(name, in string) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(requiredFailNoIn, name) - } else { - msg = fmt.Sprintf(requiredFail, name, in) - } - return &Validation{ - code: RequiredFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooLong error for when a string is too long -func TooLong(name, in string, max int64) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooLongMessageNoIn, name, max) - } else { - msg = fmt.Sprintf(tooLongMessage, name, in, max) - } - return &Validation{ - code: TooLongFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooShort error for when a string is too short -func TooShort(name, in string, min int64) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooShortMessageNoIn, name, min) - } else { - msg = fmt.Sprintf(tooShortMessage, name, in, min) - } - - return &Validation{ - code: TooShortFailCode, - Name: name, - In: in, - message: msg, - } -} - -// FailedPattern error for when a string fails a regex pattern match -// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. -func FailedPattern(name, in, pattern string) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(patternFailNoIn, name, pattern) - } else { - msg = fmt.Sprintf(patternFail, name, in, pattern) - } - - return &Validation{ - code: PatternFailCode, - Name: name, - In: in, - message: msg, - } -} - -// MultipleOfMustBePositive error for when a -// multipleOf factor is negative -func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { - return &Validation{ - code: MultipleOfMustBePositiveCode, - Name: name, - In: in, - Value: factor, - message: fmt.Sprintf(multipleOfMustBePositive, name, factor), - } -} diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig deleted file mode 100644 index 3152da69a5d7..000000000000 --- a/vendor/github.com/go-openapi/loads/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore deleted file mode 100644 index e4f15f17bfc2..000000000000 --- a/vendor/github.com/go-openapi/loads/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -secrets.yml -coverage.out -profile.cov -profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml deleted file mode 100644 index 1932914e6d1a..000000000000 --- a/vendor/github.com/go-openapi/loads/.golangci.yml +++ /dev/null @@ -1,22 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - gochecknoinits diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml deleted file mode 100644 index 8a7e05d911c6..000000000000 --- a/vendor/github.com/go-openapi/loads/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e37a..000000000000 --- a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md deleted file mode 100644 index 071cf69ab97c..000000000000 --- a/vendor/github.com/go-openapi/loads/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/loads.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) - -Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go deleted file mode 100644 index 3046da4cef39..000000000000 --- a/vendor/github.com/go-openapi/loads/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package loads provides document loading methods for swagger (OAI) specifications. - -It is used by other go-openapi packages to load and run analysis on local or remote spec documents. - -*/ -package loads diff --git a/vendor/github.com/go-openapi/loads/go.mod b/vendor/github.com/go-openapi/loads/go.mod deleted file mode 100644 index 8cf62326f6ce..000000000000 --- a/vendor/github.com/go-openapi/loads/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/go-openapi/loads - -require ( - github.com/go-openapi/analysis v0.19.5 - github.com/go-openapi/spec v0.19.3 - github.com/go-openapi/swag v0.19.5 - github.com/stretchr/testify v1.3.0 - gopkg.in/yaml.v2 v2.2.4 -) - -go 1.13 diff --git a/vendor/github.com/go-openapi/loads/go.sum b/vendor/github.com/go-openapi/loads/go.sum deleted file mode 100644 index 6eebff99d9ce..000000000000 --- a/vendor/github.com/go-openapi/loads/go.sum +++ /dev/null @@ -1,98 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go deleted file mode 100644 index e4b4a3cf7638..000000000000 --- a/vendor/github.com/go-openapi/loads/spec.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - "net/url" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// JSONDoc loads a json document from either a file or a remote url -func JSONDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// DocLoader represents a doc loader type -type DocLoader func(string) (json.RawMessage, error) - -// DocMatcher represents a predicate to check if a loader matches -type DocMatcher func(string) bool - -var ( - loaders *loader - defaultLoader *loader -) - -func init() { - defaultLoader = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc} - loaders = defaultLoader - spec.PathLoader = loaders.Fn - AddLoader(swag.YAMLMatcher, swag.YAMLDoc) - - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) - //gob.Register(spec.Refable{}) -} - -// AddLoader for a document -func AddLoader(predicate DocMatcher, load DocLoader) { - prev := loaders - loaders = &loader{ - Match: predicate, - Fn: load, - Next: prev, - } - spec.PathLoader = loaders.Fn -} - -type loader struct { - Fn DocLoader - Match DocMatcher - Next *loader -} - -// JSONSpec loads a spec from a json document -func JSONSpec(path string) (*Document, error) { - data, err := JSONDoc(path) - if err != nil { - return nil, err - } - // convert to json - return Analyzed(data, "") -} - -// Document represents a swagger spec document -type Document struct { - // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - specFilePath string - origSpec *spec.Swagger - schema *spec.Schema - raw json.RawMessage -} - -// Embedded returns a Document based on embedded specs. No analysis is required -func Embedded(orig, flat json.RawMessage) (*Document, error) { - var origSpec, flatSpec spec.Swagger - if err := json.Unmarshal(orig, &origSpec); err != nil { - return nil, err - } - if err := json.Unmarshal(flat, &flatSpec); err != nil { - return nil, err - } - return &Document{ - raw: orig, - origSpec: &origSpec, - spec: &flatSpec, - }, nil -} - -// Spec loads a new spec document -func Spec(path string) (*Document, error) { - specURL, err := url.Parse(path) - if err != nil { - return nil, err - } - var lastErr error - for l := loaders.Next; l != nil; l = l.Next { - if loaders.Match(specURL.Path) { - b, err2 := loaders.Fn(path) - if err2 != nil { - lastErr = err2 - continue - } - doc, err3 := Analyzed(b, "") - if err3 != nil { - return nil, err3 - } - if doc != nil { - doc.specFilePath = path - } - return doc, nil - } - } - if lastErr != nil { - return nil, lastErr - } - b, err := defaultLoader.Fn(path) - if err != nil { - return nil, err - } - - document, err := Analyzed(b, "") - if document != nil { - document.specFilePath = path - } - - return document, err -} - -// Analyzed creates a new analyzed spec document -func Analyzed(data json.RawMessage, version string) (*Document, error) { - if version == "" { - version = "2.0" - } - if version != "2.0" { - return nil, fmt.Errorf("spec version %q is not supported", version) - } - - raw := data - trimmed := bytes.TrimSpace(data) - if len(trimmed) > 0 { - if trimmed[0] != '{' && trimmed[0] != '[' { - yml, err := swag.BytesToYAMLDoc(trimmed) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - d, err := swag.YAMLToJSON(yml) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - raw = d - } - } - - swspec := new(spec.Swagger) - if err := json.Unmarshal(raw, swspec); err != nil { - return nil, err - } - - origsqspec, err := cloneSpec(swspec) - if err != nil { - return nil, err - } - - d := &Document{ - Analyzer: analysis.New(swspec), - schema: spec.MustLoadSwagger20Schema(), - spec: swspec, - raw: raw, - origSpec: origsqspec, - } - return d, nil -} - -// Expanded expands the ref fields in the spec document and returns a new spec document -func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { - swspec := new(spec.Swagger) - if err := json.Unmarshal(d.raw, swspec); err != nil { - return nil, err - } - - var expandOptions *spec.ExpandOptions - if len(options) > 0 { - expandOptions = options[0] - } else { - expandOptions = &spec.ExpandOptions{ - RelativeBase: d.specFilePath, - } - } - - if err := spec.ExpandSpec(swspec, expandOptions); err != nil { - return nil, err - } - - dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - specFilePath: d.specFilePath, - schema: spec.MustLoadSwagger20Schema(), - raw: d.raw, - origSpec: d.origSpec, - } - return dd, nil -} - -// BasePath the base path for this spec -func (d *Document) BasePath() string { - return d.spec.BasePath -} - -// Version returns the version of this spec -func (d *Document) Version() string { - return d.spec.Swagger -} - -// Schema returns the swagger 2.0 schema -func (d *Document) Schema() *spec.Schema { - return d.schema -} - -// Spec returns the swagger spec object model -func (d *Document) Spec() *spec.Swagger { - return d.spec -} - -// Host returns the host for the API -func (d *Document) Host() string { - return d.spec.Host -} - -// Raw returns the raw swagger spec as json bytes -func (d *Document) Raw() json.RawMessage { - return d.raw -} - -// OrigSpec yields the original spec -func (d *Document) OrigSpec() *spec.Swagger { - return d.origSpec -} - -// ResetDefinitions gives a shallow copy with the models reset -func (d *Document) ResetDefinitions() *Document { - defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) - for k, v := range d.origSpec.Definitions { - defs[k] = v - } - - d.spec.Definitions = defs - return d -} - -// Pristine creates a new pristine document instance based on the input data -func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) - return dd -} - -// SpecFilePath returns the file path of the spec if one is defined -func (d *Document) SpecFilePath() string { - return d.specFilePath -} - -func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { - var b bytes.Buffer - if err := gob.NewEncoder(&b).Encode(src); err != nil { - return nil, err - } - - var dst spec.Swagger - if err := gob.NewDecoder(&b).Decode(&dst); err != nil { - return nil, err - } - return &dst, nil -} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig deleted file mode 100644 index 3152da69a5d7..000000000000 --- a/vendor/github.com/go-openapi/runtime/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.travis.yml b/vendor/github.com/go-openapi/runtime/.travis.yml deleted file mode 100644 index 2fc7b58ff20b..000000000000 --- a/vendor/github.com/go-openapi/runtime/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: EmObnQuM9Mw8J9vpFaKKHqSMN4Wsr/A9+v7ewAD5cEhA0T1P4m7MbJMiJOhxUhj/X+BFh2DamW+P2lT8mybj5wg8wnkQ2BteKA8Tawi6f9PRw2NRheO8tAi8o/npLnlmet0kc93mn+oLuqHw36w4+j5mkOl2FghkfGiUVhwrhkCP7KXQN+3TU87e+/HzQumlJ3nsE+6terVxkH3PmaUTsS5ONaODZfuxFpfb7RsoEl3skHf6d+tr+1nViLxxly7558Nc33C+W1mr0qiEvMLZ+kJ/CpGWBJ6CUJM3jm6hNe2eMuIPwEK2hxZob8c7n22VPap4K6a0bBRoydoDXaba+2sD7Ym6ivDO/DVyL44VeBBLyIiIBylDGQdZH+6SoWm90Qe/i7tnY/T5Ao5igT8f3cfQY1c3EsTfqmlDfrhmACBmwSlgkdVBLTprHL63JMY24LWmh4jhxsmMRZhCL4dze8su1w6pLN/pD1pGHtKYCEVbdTmaM3PblNRFf12XB7qosmQsgUndH4Vq3bTbU0s1pKjeDhRyLvFzvR0TBbo0pDLEoF1A/i5GVFWa7yLZNUDudQERRh7qv/xBl2excIaQ1sV4DSVm7bAE9l6Kp+yeHQJW2uN6Y3X8wu9gB9nv9l5HBze7wh8KE6PyWAOLYYqZg9/sAtsv/2GcQqXcKFF1zcA= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e37a..000000000000 --- a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md deleted file mode 100644 index 5b1ec6494541..000000000000 --- a/vendor/github.com/go-openapi/runtime/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) - -# golang Open-API toolkit - runtime - -The runtime component for use in codegeneration or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go deleted file mode 100644 index 4459025b9279..000000000000 --- a/vendor/github.com/go-openapi/runtime/bytestream.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -func defaultCloser() error { return nil } - -type byteStreamOpt func(opts *byteStreamOpts) - -// ClosesStream when the bytestream consumer or producer is finished -func ClosesStream(opts *byteStreamOpts) { - opts.Close = true -} - -type byteStreamOpts struct { - Close bool -} - -// ByteStreamConsumer creates a consmer for byte streams, -// takes a Writer/BinaryUnmarshaler interface or binary slice by reference, -// and reads from the provided reader -func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("ByteStreamConsumer requires a reader") // early exit - } - - close := defaultCloser - if vals.Close { - if cl, ok := reader.(io.Closer); ok { - close = cl.Close - } - } - defer close() - - if wrtr, ok := data.(io.Writer); ok { - _, err := io.Copy(wrtr, reader) - return err - } - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - if bu, ok := data.(encoding.BinaryUnmarshaler); ok { - return bu.UnmarshalBinary(b) - } - - if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr { - v := reflect.Indirect(reflect.ValueOf(data)) - if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - v.SetBytes(b) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", - data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") - }) -} - -// ByteStreamProducer creates a producer for byte streams, -// takes a Reader/BinaryMarshaler interface or binary slice, -// and writes to a writer (essentially a pipe) -func ByteStreamProducer(opts ...byteStreamOpt) Producer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("ByteStreamProducer requires a writer") // early exit - } - close := defaultCloser - if vals.Close { - if cl, ok := writer.(io.Closer); ok { - close = cl.Close - } - } - defer close() - - if rc, ok := data.(io.ReadCloser); ok { - defer rc.Close() - } - - if rdr, ok := data.(io.Reader); ok { - _, err := io.Copy(writer, rdr) - return err - } - - if bm, ok := data.(encoding.BinaryMarshaler); ok { - bytes, err := bm.MarshalBinary() - if err != nil { - return err - } - - _, err = writer.Write(bytes) - return err - } - - if data != nil { - if e, ok := data.(error); ok { - _, err := writer.Write([]byte(e.Error())) - return err - } - - v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - _, err := writer.Write(v.Bytes()) - return err - } - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - _, err = writer.Write(b) - return err - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", - data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") - }) -} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go deleted file mode 100644 index c6c97d9a7c34..000000000000 --- a/vendor/github.com/go-openapi/runtime/client_auth_info.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "github.com/go-openapi/strfmt" - -// A ClientAuthInfoWriterFunc converts a function to a request writer interface -type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error - -// AuthenticateRequest adds authentication data to the request -func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// A ClientAuthInfoWriter implementor knows how to write authentication info to a request -type ClientAuthInfoWriter interface { - AuthenticateRequest(ClientRequest, strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go deleted file mode 100644 index fa21eacf3301..000000000000 --- a/vendor/github.com/go-openapi/runtime/client_operation.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "context" - "net/http" -) - -// ClientOperation represents the context for a swagger operation to be submitted to the transport -type ClientOperation struct { - ID string - Method string - PathPattern string - ProducesMediaTypes []string - ConsumesMediaTypes []string - Schemes []string - AuthInfo ClientAuthInfoWriter - Params ClientRequestWriter - Reader ClientResponseReader - Context context.Context - Client *http.Client -} - -// A ClientTransport implementor knows how to submit Request objects to some destination -type ClientTransport interface { - //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) - Submit(*ClientOperation) (interface{}, error) -} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go deleted file mode 100644 index 904196ae3e6e..000000000000 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "io" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/go-openapi/strfmt" -) - -// ClientRequestWriterFunc converts a function to a request writer interface -type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error - -// WriteToRequest adds data to the request -func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// ClientRequestWriter is an interface for things that know how to write to a request -type ClientRequestWriter interface { - WriteToRequest(ClientRequest, strfmt.Registry) error -} - -// ClientRequest is an interface for things that know how to -// add information to a swagger client request -type ClientRequest interface { - SetHeaderParam(string, ...string) error - - GetHeaderParams() http.Header - - SetQueryParam(string, ...string) error - - SetFormParam(string, ...string) error - - SetPathParam(string, string) error - - GetQueryParams() url.Values - - SetFileParam(string, ...NamedReadCloser) error - - SetBodyParam(interface{}) error - - SetTimeout(time.Duration) error - - GetMethod() string - - GetPath() string - - GetBody() []byte - - GetBodyParam() interface{} - - GetFileParam() map[string][]NamedReadCloser -} - -// NamedReadCloser represents a named ReadCloser interface -type NamedReadCloser interface { - io.ReadCloser - Name() string -} - -// NamedReader creates a NamedReadCloser for use as file upload -func NamedReader(name string, rdr io.Reader) NamedReadCloser { - rc, ok := rdr.(io.ReadCloser) - if !ok { - rc = ioutil.NopCloser(rdr) - } - return &namedReadCloser{ - name: name, - cr: rc, - } -} - -type namedReadCloser struct { - name string - cr io.ReadCloser -} - -func (n *namedReadCloser) Close() error { - return n.cr.Close() -} -func (n *namedReadCloser) Read(p []byte) (int, error) { - return n.cr.Read(p) -} -func (n *namedReadCloser) Name() string { - return n.name -} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go deleted file mode 100644 index 729e18b2283a..000000000000 --- a/vendor/github.com/go-openapi/runtime/client_response.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "fmt" - "io" -) - -// A ClientResponse represents a client response -// This bridges between responses obtained from different transports -type ClientResponse interface { - Code() int - Message() string - GetHeader(string) string - Body() io.ReadCloser -} - -// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation -type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) - -// ReadResponse reads the response -func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { - return read(resp, consumer) -} - -// A ClientResponseReader is an interface for things want to read a response. -// An application of this is to create structs from response values -type ClientResponseReader interface { - ReadResponse(ClientResponse, Consumer) (interface{}, error) -} - -// NewAPIError creates a new API error -func NewAPIError(opName string, payload interface{}, code int) *APIError { - return &APIError{ - OperationName: opName, - Response: payload, - Code: code, - } -} - -// APIError wraps an error model and captures the status code -type APIError struct { - OperationName string - Response interface{} - Code int -} - -func (a *APIError) Error() string { - return fmt.Sprintf("%s (status %d): %+v ", a.OperationName, a.Code, a.Response) -} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go deleted file mode 100644 index a4de897adcdf..000000000000 --- a/vendor/github.com/go-openapi/runtime/constants.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -const ( - // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type - HeaderContentType = "Content-Type" - - // HeaderTransferEncoding represents a http transfer-encoding header. - HeaderTransferEncoding = "Transfer-Encoding" - - // HeaderAccept the Accept header - HeaderAccept = "Accept" - - charsetKey = "charset" - - // DefaultMime the default fallback mime type - DefaultMime = "application/octet-stream" - // JSONMime the json mime type - JSONMime = "application/json" - // YAMLMime the yaml mime type - YAMLMime = "application/x-yaml" - // XMLMime the xml mime type - XMLMime = "application/xml" - // TextMime the text mime type - TextMime = "text/plain" - // HTMLMime the html mime type - HTMLMime = "text/html" - // CSVMime the csv mime type - CSVMime = "text/csv" - // MultipartFormMime the multipart form mime type - MultipartFormMime = "multipart/form-data" - // URLencodedFormMime the url encoded form mime type - URLencodedFormMime = "application/x-www-form-urlencoded" -) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go deleted file mode 100644 index d807bd915b44..000000000000 --- a/vendor/github.com/go-openapi/runtime/csv.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding/csv" - "errors" - "io" -) - -// CSVConsumer creates a new CSV consumer -func CSVConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("CSVConsumer requires a reader") - } - - csvReader := csv.NewReader(reader) - writer, ok := data.(io.Writer) - if !ok { - return errors.New("data type must be io.Writer") - } - csvWriter := csv.NewWriter(writer) - records, err := csvReader.ReadAll() - if err != nil { - return err - } - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err - } - } - csvWriter.Flush() - return nil - }) -} - -// CSVProducer creates a new CSV producer -func CSVProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("CSVProducer requires a writer") - } - - dataBytes, ok := data.([]byte) - if !ok { - return errors.New("data type must be byte array") - } - - csvReader := csv.NewReader(bytes.NewBuffer(dataBytes)) - records, err := csvReader.ReadAll() - if err != nil { - return err - } - csvWriter := csv.NewWriter(writer) - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err - } - } - csvWriter.Flush() - return nil - }) -} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go deleted file mode 100644 index 0d390cfd64c0..000000000000 --- a/vendor/github.com/go-openapi/runtime/discard.go +++ /dev/null @@ -1,9 +0,0 @@ -package runtime - -import "io" - -// DiscardConsumer does absolutely nothing, it's a black hole. -var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) - -// DiscardProducer does absolutely nothing, it's a black hole. -var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go deleted file mode 100644 index 85971c18c4b5..000000000000 --- a/vendor/github.com/go-openapi/runtime/file.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "mime/multipart" - -// File represents an uploaded file. -type File struct { - Data multipart.File - Header *multipart.FileHeader -} - -// Read bytes from the file -func (f *File) Read(p []byte) (n int, err error) { - return f.Data.Read(p) -} - -// Close the file -func (f *File) Close() error { - return f.Data.Close() -} diff --git a/vendor/github.com/go-openapi/runtime/go.mod b/vendor/github.com/go-openapi/runtime/go.mod deleted file mode 100644 index ddf6f1613af5..000000000000 --- a/vendor/github.com/go-openapi/runtime/go.mod +++ /dev/null @@ -1,16 +0,0 @@ -module github.com/go-openapi/runtime - -require ( - github.com/docker/go-units v0.4.0 - github.com/go-openapi/analysis v0.19.2 - github.com/go-openapi/errors v0.19.2 - github.com/go-openapi/loads v0.19.2 - github.com/go-openapi/spec v0.19.2 - github.com/go-openapi/strfmt v0.19.0 - github.com/go-openapi/swag v0.19.2 - github.com/go-openapi/validate v0.19.2 - github.com/stretchr/testify v1.3.0 - golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56 // indirect - golang.org/x/tools v0.0.0-20190617190820-da514acc4774 // indirect - gopkg.in/yaml.v2 v2.2.2 -) diff --git a/vendor/github.com/go-openapi/runtime/go.sum b/vendor/github.com/go-openapi/runtime/go.sum deleted file mode 100644 index bdb97c3a8a7a..000000000000 --- a/vendor/github.com/go-openapi/runtime/go.sum +++ /dev/null @@ -1,133 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf h1:eg0MeVzsP1G42dRafH3vf+al2vQIJU0YHX+1Tw87oco= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.17.2 h1:eYp14J1o8TTSCzndHBtsNuckikV1PfZOSnx4BcBeu0c= -github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2 h1:ophLETFestFZHk3ji7niPEL4d466QjW+0Tdg5VyDq7E= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.17.2 h1:azEQ8Fnx0jmtFF2fxsnmd6I0x6rsweUF63qqSO1NmKk= -github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.17.2 h1:3ekBy41gar/iJi2KSh/au/PrC2vpLr85upF/UZmm3W0= -github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.17.2 h1:lF3z7AH8dd0IKXc1zEBi1dj0B4XgVb5cVjn39dCK3Ls= -github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.17.2 h1:tEXYu6Xc0pevpzzQx5ghrMN9F7IVpN/+u4iD3rkYE5o= -github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2 h1:rf5ArTHmIJxyV5Oiks+Su0mUens1+AjpkPoWr5xFRcI= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M= -github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.17.2 h1:2KDns36DMHXG9/iYkOjiX+/8fKK9GCU5ELZ+J6qcRVA= -github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k= -github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.17.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.17.2 h1:lwFfiS4sv5DvOrsYDsYq4N7UU8ghXiYtPJ+VcQnC3Xg= -github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2 h1:ky5l57HjyVRrsJfd2+Ro5Z9PjGuKbsmftwyMtk8H7js= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe h1:W/GaMY0y69G4cFlmsC6B9sbuo2fP8OFP1ABjt4kPz+w= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53 h1:kcXqo9vE6fsZY5X5Rd7R1l7fTgnWaDCVmln65REefiE= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go deleted file mode 100644 index 4d111db4fec0..000000000000 --- a/vendor/github.com/go-openapi/runtime/headers.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "mime" - "net/http" - - "github.com/go-openapi/errors" -) - -// ContentType parses a content type header -func ContentType(headers http.Header) (string, string, error) { - ct := headers.Get(HeaderContentType) - orig := ct - if ct == "" { - ct = DefaultMime - } - if ct == "" { - return "", "", nil - } - - mt, opts, err := mime.ParseMediaType(ct) - if err != nil { - return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) - } - - if cs, ok := opts[charsetKey]; ok { - return mt, cs, nil - } - - return mt, "", nil -} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go deleted file mode 100644 index 65de0aa44b96..000000000000 --- a/vendor/github.com/go-openapi/runtime/interfaces.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "io" - "net/http" - - "github.com/go-openapi/strfmt" -) - -// OperationHandlerFunc an adapter for a function to the OperationHandler interface -type OperationHandlerFunc func(interface{}) (interface{}, error) - -// Handle implements the operation handler interface -func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { - return s(data) -} - -// OperationHandler a handler for a swagger operation -type OperationHandler interface { - Handle(interface{}) (interface{}, error) -} - -// ConsumerFunc represents a function that can be used as a consumer -type ConsumerFunc func(io.Reader, interface{}) error - -// Consume consumes the reader into the data parameter -func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { - return fn(reader, data) -} - -// Consumer implementations know how to bind the values on the provided interface to -// data provided by the request body -type Consumer interface { - // Consume performs the binding of request values - Consume(io.Reader, interface{}) error -} - -// ProducerFunc represents a function that can be used as a producer -type ProducerFunc func(io.Writer, interface{}) error - -// Produce produces the response for the provided data -func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { - return f(writer, data) -} - -// Producer implementations know how to turn the provided interface into a valid -// HTTP response -type Producer interface { - // Produce writes to the http response - Produce(io.Writer, interface{}) error -} - -// AuthenticatorFunc turns a function into an authenticator -type AuthenticatorFunc func(interface{}) (bool, interface{}, error) - -// Authenticate authenticates the request with the provided data -func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { - return f(params) -} - -// Authenticator represents an authentication strategy -// implementations of Authenticator know how to authenticate the -// request data and translate that into a valid principal object or an error -type Authenticator interface { - Authenticate(interface{}) (bool, interface{}, error) -} - -// AuthorizerFunc turns a function into an authorizer -type AuthorizerFunc func(*http.Request, interface{}) error - -// Authorize authorizes the processing of the request for the principal -func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { - return f(r, principal) -} - -// Authorizer represents an authorization strategy -// implementations of Authorizer know how to authorize the principal object -// using the request data and returns error if unauthorized -type Authorizer interface { - Authorize(*http.Request, interface{}) error -} - -// Validatable types implementing this interface allow customizing their validation -// this will be used instead of the reflective validation based on the spec document. -// the implementations are assumed to have been generated by the swagger tool so they should -// contain all the validations obtained from the spec -type Validatable interface { - Validate(strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go deleted file mode 100644 index 5a690559cc59..000000000000 --- a/vendor/github.com/go-openapi/runtime/json.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONConsumer creates a new JSON consumer -func JSONConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := json.NewDecoder(reader) - dec.UseNumber() // preserve number formats - return dec.Decode(data) - }) -} - -// JSONProducer creates a new JSON producer -func JSONProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := json.NewEncoder(writer) - enc.SetEscapeHTML(false) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go deleted file mode 100644 index 9e51b42b59d0..000000000000 --- a/vendor/github.com/go-openapi/runtime/request.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bufio" - "io" - "net/http" - "strings" - - "github.com/go-openapi/swag" -) - -// CanHaveBody returns true if this method can have a body -func CanHaveBody(method string) bool { - mn := strings.ToUpper(method) - return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" -} - -// IsSafe returns true if this is a request with a safe method -func IsSafe(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn == "GET" || mn == "HEAD" -} - -// AllowsBody returns true if the request allows for a body -func AllowsBody(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn != "HEAD" -} - -// HasBody returns true if this method needs a content-type -func HasBody(r *http.Request) bool { - // happy case: we have a content length set - if r.ContentLength > 0 { - return true - } - - if r.Header.Get(http.CanonicalHeaderKey("content-length")) != "" { - // in this case, no Transfer-Encoding should be present - // we have a header set but it was explicitly set to 0, so we assume no body - return false - } - - rdr := newPeekingReader(r.Body) - r.Body = rdr - return rdr.HasContent() -} - -func newPeekingReader(r io.ReadCloser) *peekingReader { - if r == nil { - return nil - } - return &peekingReader{ - underlying: bufio.NewReader(r), - orig: r, - } -} - -type peekingReader struct { - underlying interface { - Buffered() int - Peek(int) ([]byte, error) - Read([]byte) (int, error) - } - orig io.ReadCloser -} - -func (p *peekingReader) HasContent() bool { - if p == nil { - return false - } - if p.underlying.Buffered() > 0 { - return true - } - b, err := p.underlying.Peek(1) - if err != nil { - return false - } - return len(b) > 0 -} - -func (p *peekingReader) Read(d []byte) (int, error) { - if p == nil { - return 0, io.EOF - } - return p.underlying.Read(d) -} - -func (p *peekingReader) Close() error { - p.underlying = nil - if p.orig != nil { - return p.orig.Close() - } - return nil -} - -// JSONRequest creates a new http request with json headers set -func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, urlStr, body) - if err != nil { - return nil, err - } - req.Header.Add(HeaderContentType, JSONMime) - req.Header.Add(HeaderAccept, JSONMime) - return req, nil -} - -// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) -type Gettable interface { - GetOK(string) ([]string, bool, bool) -} - -// ReadSingleValue reads a single value from the source -func ReadSingleValue(values Gettable, name string) string { - vv, _, hv := values.GetOK(name) - if hv { - return vv[len(vv)-1] - } - return "" -} - -// ReadCollectionValue reads a collection value from a string data source -func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { - v := ReadSingleValue(values, name) - return swag.SplitByFormat(v, collectionFormat) -} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go deleted file mode 100644 index 3b011a0bff19..000000000000 --- a/vendor/github.com/go-openapi/runtime/statuses.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -// Statuses lists the most common HTTP status codes to default message -// taken from https://httpstatuses.com/ -var Statuses = map[int]string{ - 100: "Continue", - 101: "Switching Protocols", - 102: "Processing", - 103: "Checkpoint", - 122: "URI too long", - 200: "OK", - 201: "Created", - 202: "Accepted", - 203: "Request Processed", - 204: "No Content", - 205: "Reset Content", - 206: "Partial Content", - 207: "Multi-Status", - 208: "Already Reported", - 226: "IM Used", - 300: "Multiple Choices", - 301: "Moved Permanently", - 302: "Found", - 303: "See Other", - 304: "Not Modified", - 305: "Use Proxy", - 306: "Switch Proxy", - 307: "Temporary Redirect", - 308: "Permanent Redirect", - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", - 403: "Forbidden", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Request Entity Too Large", - 414: "Request-URI Too Long", - 415: "Unsupported Media Type", - 416: "Request Range Not Satisfiable", - 417: "Expectation Failed", - 418: "I'm a teapot", - 420: "Enhance Your Calm", - 422: "Unprocessable Entity", - 423: "Locked", - 424: "Failed Dependency", - 426: "Upgrade Required", - 428: "Precondition Required", - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 444: "No Response", - 449: "Retry With", - 450: "Blocked by Windows Parental Controls", - 451: "Wrong Exchange Server", - 499: "Client Closed Request", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", - 507: "Insufficient Storage", - 508: "Loop Detected", - 509: "Bandwidth Limit Exceeded", - 510: "Not Extended", - 511: "Network Authentication Required", - 598: "Network read timeout error", - 599: "Network connect timeout error", -} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go deleted file mode 100644 index c7fd04c3c5c1..000000000000 --- a/vendor/github.com/go-openapi/runtime/text.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -// TextConsumer creates a new text consumer -func TextConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("TextConsumer requires a reader") // early exit - } - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - // If the buffer is empty, no need to unmarshal it, which causes a panic. - if len(b) == 0 { - data = "" - return nil - } - - if tu, ok := data.(encoding.TextUnmarshaler); ok { - err := tu.UnmarshalText(b) - if err != nil { - return fmt.Errorf("text consumer: %v", err) - } - - return nil - } - - t := reflect.TypeOf(data) - if data != nil && t.Kind() == reflect.Ptr { - v := reflect.Indirect(reflect.ValueOf(data)) - if t.Elem().Kind() == reflect.String { - v.SetString(string(b)) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", - data, data, "can be resolved by supporting TextUnmarshaler interface") - }) -} - -// TextProducer creates a new text producer -func TextProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("TextProducer requires a writer") // early exit - } - - if data == nil { - return errors.New("no data given to produce text from") - } - - if tm, ok := data.(encoding.TextMarshaler); ok { - txt, err := tm.MarshalText() - if err != nil { - return fmt.Errorf("text producer: %v", err) - } - _, err = writer.Write(txt) - return err - } - - if str, ok := data.(error); ok { - _, err := writer.Write([]byte(str.Error())) - return err - } - - if str, ok := data.(fmt.Stringer); ok { - _, err := writer.Write([]byte(str.String())) - return err - } - - v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - _, err = writer.Write(b) - return err - } - if v.Kind() != reflect.String { - return fmt.Errorf("%T is not a supported type by the TextProducer", data) - } - - _, err := writer.Write([]byte(v.String())) - return err - }) -} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go deleted file mode 100644 index 11f5732af4e3..000000000000 --- a/vendor/github.com/go-openapi/runtime/values.go +++ /dev/null @@ -1,19 +0,0 @@ -package runtime - -// Values typically represent parameters on a http request. -type Values map[string][]string - -// GetOK returns the values collection for the given key. -// When the key is present in the map it will return true for hasKey. -// When the value is not empty it will return true for hasValue. -func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { - value, hasKey = v[key] - if !hasKey { - return - } - if len(value) == 0 { - return - } - hasValue = true - return -} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go deleted file mode 100644 index 821c7393dfbb..000000000000 --- a/vendor/github.com/go-openapi/runtime/xml.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/xml" - "io" -) - -// XMLConsumer creates a new XML consumer -func XMLConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := xml.NewDecoder(reader) - return dec.Decode(data) - }) -} - -// XMLProducer creates a new XML producer -func XMLProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := xml.NewEncoder(writer) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig deleted file mode 100644 index 3152da69a5d7..000000000000 --- a/vendor/github.com/go-openapi/strfmt/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml deleted file mode 100644 index f260ce7e5cd1..000000000000 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ /dev/null @@ -1,30 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 31 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals - -issues: - exclude-rules: - - path: bson.go - text: "should be .*ObjectID" - linters: - - golint - diff --git a/vendor/github.com/go-openapi/strfmt/.travis.yml b/vendor/github.com/go-openapi/strfmt/.travis.yml deleted file mode 100644 index eb962aebcda8..000000000000 --- a/vendor/github.com/go-openapi/strfmt/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.11.x -- 1.12.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -env: -- GO111MODULE=on -notifications: - slack: - secure: zE5AtIYTpYfQPnTzP+EaQPN7JKtfFAGv6PrJqoIZLOXa8B6zGb6+J1JRNNxWi7faWbyJOxa4FSSsuPsKZMycUK6wlLFIdhDxwqeo7Ew8r6rdZKdfUHQggfNS9wO79ARoNYUDHtmnaBUS+eWSM1YqSc4i99QxyyfuURLOeAaA/q14YbdlTlaw3lrZ0qT92ot1FnVGNOx064zuHtFeUf+jAVRMZ6Q3rvqllwIlPszE6rmHGXBt2VoJxRaBetdwd7FgkcYw9FPXKHhadwC7/75ZAdmxIukhxNMw4Tr5NuPcqNcnbYLenDP7B3lssGVIrP4BRSqekS1d/tqvdvnnFWHMwrNCkSnSc065G5+qWTlXKAemIclgiXXqE2furBNLm05MDdG8fn5epS0UNarkjD+zX336RiqwBlOX4KbF+vPyqcO98CsN0lnd+H6loc9reiTHs37orFFpQ+309av9be2GGsHUsRB9ssIyrewmhAccOmkRtr2dVTZJNFQwa5Kph5TNJuTjnZEwG/xUkEX2YSfwShOsb062JWiflV6PJdnl80pc9Tn7D5sO5Bf9DbijGRJwwP+YiiJtwtr+vsvS+n4sM0b5eqm4UoRo+JJO8ffoJtHS7ItuyRbVQCwEPJ4221WLcf5PquEEDdAPwR+K4Gj8qTXqTDdxOiES1xFUKVgmzhI= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e37a..000000000000 --- a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md deleted file mode 100644 index 87357cd024c7..000000000000 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) - -This package exposes a registry of data types to support string formats in the go-openapi toolkit. - -strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. - -## Supported data formats -go-openapi/strfmt follows the swagger 2.0 specification with the following formats -defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). - -It also provides convenient extensions to go-openapi users. - -- [x] JSON-schema draft 4 formats - - date-time - - email - - hostname - - ipv4 - - ipv6 - - uri -- [x] swagger 2.0 format extensions - - binary - - byte (e.g. base64 encoded string) - - date (e.g. "1970-01-01") - - password -- [x] go-openapi custom format extensions - - bsonobjectid (BSON objectID) - - creditcard - - duration (e.g. "3 weeks", "1ms") - - hexcolor (e.g. "#FFFFFF") - - isbn, isbn10, isbn13 - - mac (e.g "01:02:03:04:05:06") - - rgbcolor (e.g. "rgb(100,100,100)") - - ssn - - uuid, uuid3, uuid4, uuid5 - - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") - -> NOTE: as the name stands for, this package is intended to support string formatting only. -> It does not provide validation for numerical values with swagger format extension for JSON types "number" or -> "integer" (e.g. float, double, int32...). - -## Format types -Types defined in strfmt expose marshaling and validation capabilities. - -List of defined types: -- Base64 -- CreditCard -- Date -- DateTime -- Duration -- Email -- HexColor -- Hostname -- IPv4 -- IPv6 -- CIDR -- ISBN -- ISBN10 -- ISBN13 -- MAC -- ObjectId -- Password -- RGBColor -- SSN -- URI -- UUID -- UUID3 -- UUID4 -- UUID5 diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go deleted file mode 100644 index c14961221825..000000000000 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "fmt" - - "go.mongodb.org/mongo-driver/bson" - - "go.mongodb.org/mongo-driver/bson/bsontype" - bsonprim "go.mongodb.org/mongo-driver/bson/primitive" -) - -func init() { - var id ObjectId - // register this format in the default registry - Default.Add("bsonobjectid", &id, IsBSONObjectID) -} - -// IsBSONObjectID returns true when the string is a valid BSON.ObjectId -func IsBSONObjectID(str string) bool { - _, err := bsonprim.ObjectIDFromHex(str) - return err == nil -} - -// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) -// -// swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID - -// NewObjectId creates a ObjectId from a Hex String -func NewObjectId(hex string) ObjectId { - oid, err := bsonprim.ObjectIDFromHex(hex) - if err != nil { - panic(err) - } - return ObjectId(oid) -} - -// MarshalText turns this instance into text -func (id ObjectId) MarshalText() ([]byte, error) { - oid := bsonprim.ObjectID(id) - if oid == bsonprim.NilObjectID { - return nil, nil - } - return []byte(oid.Hex()), nil -} - -// UnmarshalText hydrates this instance from text -func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on - if len(data) == 0 { - *id = ObjectId(bsonprim.NilObjectID) - return nil - } - oidstr := string(data) - oid, err := bsonprim.ObjectIDFromHex(oidstr) - if err != nil { - return err - } - *id = ObjectId(oid) - return nil -} - -// Scan read a value from a database driver -func (id *ObjectId) Scan(raw interface{}) error { - var data []byte - switch v := raw.(type) { - case []byte: - data = v - case string: - data = []byte(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) - } - - return id.UnmarshalText(data) -} - -// Value converts a value to a database driver value -func (id ObjectId) Value() (driver.Value, error) { - return driver.Value(bsonprim.ObjectID(id).Hex()), nil -} - -func (id ObjectId) String() string { - return bsonprim.ObjectID(id).String() -} - -// MarshalJSON returns the ObjectId as JSON -func (id ObjectId) MarshalJSON() ([]byte, error) { - return bsonprim.ObjectID(id).MarshalJSON() -} - -// UnmarshalJSON sets the ObjectId from JSON -func (id *ObjectId) UnmarshalJSON(data []byte) error { - var obj bsonprim.ObjectID - if err := obj.UnmarshalJSON(data); err != nil { - return err - } - *id = ObjectId(obj) - return nil -} - -// MarshalBSON renders the object id as a BSON document -func (id ObjectId) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) -} - -// UnmarshalBSON reads the objectId from a BSON document -func (id *ObjectId) UnmarshalBSON(data []byte) error { - var obj struct { - Data bsonprim.ObjectID - } - if err := bson.Unmarshal(data, &obj); err != nil { - return err - } - *id = ObjectId(obj.Data) - return nil -} - -// MarshalBSONValue is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { - oid := bsonprim.ObjectID(id) - return bsontype.ObjectID, oid[:], nil -} - -// UnmarshalBSONValue is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - var oid bsonprim.ObjectID - copy(oid[:], data) - *id = ObjectId(oid) - return nil -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (id *ObjectId) DeepCopyInto(out *ObjectId) { - *out = *id -} - -// DeepCopy copies the receiver into a new ObjectId. -func (id *ObjectId) DeepCopy() *ObjectId { - if id == nil { - return nil - } - out := new(ObjectId) - id.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go deleted file mode 100644 index 2959e6573038..000000000000 --- a/vendor/github.com/go-openapi/strfmt/date.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "time" - - "go.mongodb.org/mongo-driver/bson" -) - -func init() { - d := Date{} - // register this format in the default registry - Default.Add("date", &d, IsDate) -} - -// IsDate returns true when the string is a valid date -func IsDate(str string) bool { - _, err := time.Parse(RFC3339FullDate, str) - return err == nil -} - -const ( - // RFC3339FullDate represents a full-date as specified by RFC3339 - // See: http://goo.gl/xXOvVd - RFC3339FullDate = "2006-01-02" -) - -// Date represents a date from the API -// -// swagger:strfmt date -type Date time.Time - -// String converts this date into a string -func (d Date) String() string { - return time.Time(d).Format(RFC3339FullDate) -} - -// UnmarshalText parses a text representation into a date type -func (d *Date) UnmarshalText(text []byte) error { - if len(text) == 0 { - return nil - } - dd, err := time.Parse(RFC3339FullDate, string(text)) - if err != nil { - return err - } - *d = Date(dd) - return nil -} - -// MarshalText serializes this date type to string -func (d Date) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// Scan scans a Date value from database driver type. -func (d *Date) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - return d.UnmarshalText(v) - case string: - return d.UnmarshalText([]byte(v)) - case time.Time: - *d = Date(v) - return nil - case nil: - *d = Date{} - return nil - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) - } -} - -// Value converts Date to a primitive value ready to written to a database. -func (d Date) Value() (driver.Value, error) { - return driver.Value(d.String()), nil -} - -// MarshalJSON returns the Date as JSON -func (d Date) MarshalJSON() ([]byte, error) { - return json.Marshal(time.Time(d).Format(RFC3339FullDate)) -} - -// UnmarshalJSON sets the Date from JSON -func (d *Date) UnmarshalJSON(data []byte) error { - if string(data) == jsonNull { - return nil - } - var strdate string - if err := json.Unmarshal(data, &strdate); err != nil { - return err - } - tt, err := time.Parse(RFC3339FullDate, strdate) - if err != nil { - return err - } - *d = Date(tt) - return nil -} - -func (d Date) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": d.String()}) -} - -func (d *Date) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { - return err - } - - if data, ok := m["data"].(string); ok { - rd, err := time.Parse(RFC3339FullDate, data) - if err != nil { - return err - } - *d = Date(rd) - return nil - } - - return errors.New("couldn't unmarshal bson bytes value as Date") -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (d *Date) DeepCopyInto(out *Date) { - *out = *d -} - -// DeepCopy copies the receiver into a new Date. -func (d *Date) DeepCopy() *Date { - if d == nil { - return nil - } - out := new(Date) - d.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go deleted file mode 100644 index a89a4de3f38d..000000000000 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ /dev/null @@ -1,2035 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/mail" - "regexp" - "strings" - - "github.com/asaskevich/govalidator" - "go.mongodb.org/mongo-driver/bson" -) - -const ( - // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 - // A string instance is valid against this attribute if it is a valid - // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - // http://tools.ietf.org/html/rfc1034#section-3.5 - // ::= any one of the ten digits 0 through 9 - // var digit = /[0-9]/; - // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case - // var letter = /[a-zA-Z]/; - // ::= | - // var letDig = /[0-9a-zA-Z]/; - // ::= | "-" - // var letDigHyp = /[-0-9a-zA-Z]/; - // ::= | - // var ldhStr = /[-0-9a-zA-Z]+/; - //