diff --git a/go.mod b/go.mod index 8594a91405..5b7a5ddb64 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,8 @@ require ( github.com/openshift/api v3.9.1-0.20190924102528-32369d4db2ad+incompatible github.com/openshift/client-go v0.0.0-20230120202327-72f107311084 github.com/openshift/library-go v0.0.0-20230503125631-c31a6e7b87d4 - github.com/operator-framework/api v0.3.7-0.20200528122852-759ca0d84007 + github.com/operator-framework/api v0.15.0 + github.com/operator-framework/operator-lib v0.11.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/common v0.37.0 @@ -58,7 +59,6 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect diff --git a/go.sum b/go.sum index ead1b47780..d98ab87a76 100644 --- a/go.sum +++ b/go.sum @@ -182,7 +182,6 @@ github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngE github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= @@ -939,8 +938,8 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= @@ -996,8 +995,11 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/operator-framework/api v0.1.1/go.mod h1:yzNYR7qyJqRGOOp+bT6Z/iYSbSPNxeh3Si93Gx/3OBY= github.com/operator-framework/api v0.3.4/go.mod h1:TmRmw+8XOUaDPq6SP9gA8cIexNf/Pq8LMFY7YaKQFTs= -github.com/operator-framework/api v0.3.7-0.20200528122852-759ca0d84007 h1:fbpQelcJmwRofhaRrMOUynmBcT6SBhOZOjSlgaXVyyk= github.com/operator-framework/api v0.3.7-0.20200528122852-759ca0d84007/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= +github.com/operator-framework/api v0.15.0 h1:4f9i0drtqHj7ykLoHxv92GR43S7MmQHhmFQkfm5YaGI= +github.com/operator-framework/api v0.15.0/go.mod h1:scnY9xqSeCsOdtJtNoHIXd7OtHZ14gj1hkDA4+DlgLY= +github.com/operator-framework/operator-lib v0.11.0 h1:eYzqpiOfq9WBI4Trddisiq/X9BwCisZd3rIzmHRC9Z8= +github.com/operator-framework/operator-lib v0.11.0/go.mod h1:RpyKhFAoG6DmKTDIwMuO6pI3LRc8IE9rxEYWy476o6g= github.com/operator-framework/operator-registry v1.5.3/go.mod h1:agrQlkWOo1q8U1SAaLSS2WQ+Z9vswNT2M2HFib9iuLY= github.com/operator-framework/operator-registry v1.12.1/go.mod h1:rf4b/h77GUv1+geiej2KzGRQr8iBLF4dXNwr5AuGkrQ= github.com/operator-framework/operator-registry v1.12.4/go.mod h1:JChIivJVLE1wRbgIhDFzYQYT9yosa2wd6qiTyMuG5mg= diff --git a/manifests/02-deployment-ibm-cloud-managed.yaml b/manifests/02-deployment-ibm-cloud-managed.yaml index 844e0d0dd0..6d5bb8e21d 100644 --- a/manifests/02-deployment-ibm-cloud-managed.yaml +++ b/manifests/02-deployment-ibm-cloud-managed.yaml @@ -41,7 +41,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: IMAGE - value: openshift/origin-haproxy-router:v4.0 + value: quay.io/rfredette/openshift-router:haproxy-router-c0f5c8ef - name: CANARY_IMAGE value: openshift/origin-cluster-ingress-operator:latest image: openshift/origin-cluster-ingress-operator:latest diff --git a/manifests/02-deployment.yaml b/manifests/02-deployment.yaml index 4f8d3a4083..68dbda0b71 100644 --- a/manifests/02-deployment.yaml +++ b/manifests/02-deployment.yaml @@ -70,7 +70,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: IMAGE - value: openshift/origin-haproxy-router:v4.0 + value: quay.io/rfredette/openshift-router:haproxy-router-c0f5c8ef - name: CANARY_IMAGE value: openshift/origin-cluster-ingress-operator:latest resources: diff --git a/pkg/manifests/bindata.go b/pkg/manifests/bindata.go index 0aa2368ab9..c735d3f065 100644 --- a/pkg/manifests/bindata.go +++ b/pkg/manifests/bindata.go @@ -34,8 +34,8 @@ // manifests/01-service-account.yaml (405B) // manifests/01-service.yaml (538B) // manifests/01-trusted-ca-configmap.yaml (517B) -// manifests/02-deployment-ibm-cloud-managed.yaml (3.856kB) -// manifests/02-deployment.yaml (4.288kB) +// manifests/02-deployment-ibm-cloud-managed.yaml (3.878kB) +// manifests/02-deployment.yaml (4.31kB) // manifests/03-cluster-operator.yaml (1.047kB) // manifests/image-references (435B) @@ -786,7 +786,7 @@ func manifests01TrustedCaConfigmapYaml() (*asset, error) { return a, nil } -var _manifests02DeploymentIbmCloudManagedYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x57\xdd\x6f\xdb\x36\x10\x7f\xf7\x5f\x41\x64\x03\xba\x01\xa5\xe5\xa4\x1f\x5b\x05\xe4\xc1\x73\xdc\x26\x40\xe2\x18\x76\xd0\x62\x4f\xc6\x99\x3a\xdb\x9c\x29\x52\x3d\x1e\xdd\x08\xc3\xfe\xf7\x81\x92\xeb\x48\xb6\xf3\x01\x74\x0f\xd3\x93\x44\xde\xfd\x8e\xf7\xbb\x0f\x9e\x7e\x12\x5f\xfa\x93\xd1\xd5\xe8\x53\x2a\x2e\x6e\xc5\xe8\xf6\x4e\x0c\x2f\xae\xee\x5e\x8b\x25\x5a\x24\x60\xcc\xc4\xbc\x14\x2b\x50\xeb\x24\x14\x19\x30\xca\x82\xdc\x42\x1b\x94\x39\x58\xbd\x40\xcf\xbe\xeb\x57\x1d\x28\xf4\x67\x24\xaf\x9d\x4d\x05\x14\x85\x4f\x36\xa7\x9d\xb5\xb6\x59\x2a\x2e\xb0\x30\xae\xcc\xd1\x72\x27\x47\x86\x0c\x18\xd2\x8e\x10\x60\xad\x63\x60\xed\xac\x8f\x9f\x42\x28\x67\x17\x7a\xd9\x75\x05\x5a\xbf\xd2\x0b\xee\x6a\x97\x68\xfb\x17\x2a\x8e\x16\xef\xcb\x54\x68\xbb\x24\xf4\x5e\xba\x22\x1e\xcc\x51\xa5\xa6\xad\x32\x21\xc3\x2e\xa1\x41\xf0\xb8\xa7\x3f\xcf\xa5\x32\x2e\x64\xf1\xb0\xb0\xc4\x2c\x15\x27\x4c\x01\x4f\x3a\x42\x58\xc8\xf1\x28\x66\xdc\xf0\x05\x28\x4c\xc5\x0e\x4c\x1e\xc8\xf9\x02\x55\x3c\x38\x61\x61\xb4\x02\x9f\x8a\xd3\x8e\x10\x1e\x0d\x2a\x76\x54\xbb\x94\x03\xab\xd5\x35\xcc\xd1\x6c\x7d\x7c\xc2\xaa\xe7\x48\xf6\xb2\xac\x05\xb9\x2c\x30\x15\x13\x54\x84\xc0\xd8\x11\x82\x31\x2f\x0c\x30\x6e\x81\x1b\x44\xc6\xe7\x80\xcc\x0a\x03\x68\x89\xdc\xfd\xe6\x68\x6d\x1c\x64\x6d\x66\x6a\x3e\x62\x54\x52\xf1\xea\xef\x13\x5c\x2c\x50\xf1\x49\x2a\x4e\xc6\x84\x0b\x24\xc2\xec\x22\x90\xb6\xcb\xa9\x5a\x61\x16\x8c\xb6\xcb\x93\x7f\x5e\x6d\xa1\x4d\xcb\xa3\x27\x7c\x12\xe2\x3b\x4b\xdb\x00\x33\x68\x8b\xb4\x53\x95\x42\xb9\x3c\x07\x9b\x3d\x60\xc9\xe3\x38\xf5\x96\x67\x20\x6e\x7c\x4b\xb9\x8b\x55\x63\xf5\xe7\x5f\xbe\xf4\xef\x06\x97\xb3\x51\xff\x66\x38\x1d\xf7\x07\xc3\x5f\x5b\x2a\x3a\x87\x65\x5b\xfc\xea\xa6\xff\x69\x4f\x48\x81\x05\x2a\x8f\xc8\x0e\xfa\xa3\xfe\xe4\xcf\xd9\x11\x95\x6d\x02\xca\x4d\x5d\x06\x2d\xad\xc9\xf0\x7a\xd8\x9f\x0e\x67\x9f\x87\x93\xe9\xd5\xed\xe8\x41\x11\xed\xa6\xe9\x7b\xcd\xe4\x9e\xf4\x6e\x5f\x88\x0d\x98\x80\xa9\xe8\x75\x7b\xdd\x53\xe9\x2d\x14\x7e\xe5\xf8\x40\x7f\xcf\xfd\x7d\xfd\x8f\xe4\xf2\xb4\xb1\x28\xc4\x42\xa3\xc9\x26\xb8\x68\xaf\x6e\xd7\xc7\xc0\xab\x74\x97\x71\xdd\x63\x8c\xd7\x66\x2b\x4e\x0e\x0f\xbb\x4b\xbb\xc4\x91\x5e\x6a\x2b\x57\x50\xd5\xb3\x24\x17\x18\x29\xdd\xbc\xed\xf6\x0e\xb0\x9a\x34\xbf\x00\x52\x99\xe0\x19\xe9\xa0\x4a\xd3\x58\x32\xfe\x81\xa1\x2a\x9e\x3f\xaa\x3f\x0e\xc6\x8c\x9d\xd1\xaa\x4c\xc5\xd5\x62\xe4\x78\x4c\xe8\x63\x7b\xfb\x2e\xf5\x44\x3d\xc4\x87\xd0\xbb\x40\x0a\x7d\x93\x6e\xc2\xaf\x21\xf6\xd2\x76\x08\x54\x11\x52\x71\xda\xcb\x5b\x8b\x39\xe6\x8e\xca\x54\xbc\x7b\x7f\xa3\x77\x1b\x1e\x55\x20\xcd\xe5\xc0\x59\xc6\x7b\x6e\xc2\x80\x31\xee\xdb\x98\xf4\x46\x1b\x5c\xe2\xd0\x2b\x30\x55\xab\x48\xc5\x02\x8c\xc7\x86\xa4\x82\x02\xe6\xda\x68\xd6\xb8\x77\x90\x8c\x5c\xd1\x5e\x91\xa2\x7f\x7d\xfd\xd0\x6b\x90\x72\x6d\x2b\xd8\x1b\xf4\x3e\xb2\xb4\x65\xe8\x23\x18\x33\x07\xb5\xbe\x73\xd7\x6e\xe9\x6f\xed\x90\xa8\xc1\xc5\xc6\x99\x90\xe3\x8d\x0b\xb6\xe9\xba\x14\x79\x5c\xa9\x33\x2f\x41\x56\x49\xb1\xd6\x89\x02\xc9\x14\x3c\x27\x78\xcf\x04\x8a\x31\x4b\x0a\x6c\x52\x53\xf3\x5e\xc9\x60\x26\x15\xb4\xe8\x85\xec\xd6\x9a\xb2\xda\xc6\x47\x0c\x6d\x80\x12\x0a\x36\xf1\xb1\xed\xb2\x4f\x1e\xd2\xc4\x23\x6d\xb4\x42\x50\x2a\x8a\x1f\x98\x9c\xbb\x60\x33\xe9\x41\xb2\x5b\xa3\x7d\xce\xac\x14\x40\xcb\x96\xb7\x52\x1a\xb7\x64\xe7\x39\x43\xa2\xd6\x7a\x15\x56\x94\x46\x7b\x46\x2b\x21\xcb\x62\x4a\x9d\xa7\x1f\xde\x7c\x78\xd3\x92\x63\xe3\xa5\xd2\xc5\x0a\x49\xfa\xa0\x19\xfd\xf9\xdd\xf5\x74\x36\x1c\x5c\x5c\x0e\x67\x93\x69\x7f\xf6\xe5\xea\xee\x72\xd6\x1f\x4e\x67\xa7\x67\xbf\xcf\x3e\x0d\x6e\x66\xd3\xcb\xfe\xd9\xbb\xf7\xaf\x1f\xa4\x86\x83\x8b\x67\xe4\x0e\x70\x06\x7f\x0c\x5e\x84\x73\x54\xee\x09\xb4\x96\x67\xa1\xf0\x4c\x08\xf9\xf9\x8a\xb9\x48\x93\xe4\xf4\xec\xb7\x6e\xd5\xff\xd2\xf7\xbd\x5e\xaf\x97\x1c\xd2\x80\xc4\x32\x0e\x28\xe7\x55\xe6\xb0\xf1\x49\x41\x7a\x03\x8c\xf1\xbd\xab\xf6\x6e\x90\xa8\xb2\xdd\x97\x6b\x2c\x9f\xd0\x5c\x63\xb9\xdf\x48\xbe\x06\x28\xe3\x6d\x7a\xd0\x50\xd6\x61\x8e\x92\xe6\xa0\xb6\xb3\xcb\x5e\x1f\xa9\xd3\x66\x4f\x68\xb7\x5b\x38\x6a\x57\xc3\xee\xe6\x1c\x3b\xe2\x54\xb4\xa2\xff\x1d\x2c\x47\x26\xad\xfc\x7f\xde\x65\xde\xf6\xfe\x3f\x5d\xe6\xa5\xdd\xa2\x11\xb9\xc7\x78\x8a\x71\x7f\xae\x50\xad\xcb\x70\xda\x1a\xe8\xe2\x13\xa3\x46\x16\x19\x7d\x15\x79\x9f\x0a\xa3\x6d\xb8\xdf\xee\x17\xa4\x5d\xc5\x90\x01\xef\x47\x95\x45\x5f\x7a\xc6\x7c\x77\xc7\x28\xd2\xac\x15\x98\xce\x33\x94\x52\xb0\x7d\x3f\x72\x76\xe2\x1c\xef\xb5\x2d\x8f\x4a\xb9\xbc\x18\xd7\x83\x78\x93\xb1\xed\xdc\x18\x2c\xeb\x1c\x2f\x70\x01\xc1\xf0\xce\x50\xd5\xc3\xfa\x75\x0f\x1b\x3d\x75\x47\xb1\x33\xf1\xbb\x39\x4e\x4a\x51\x8f\x88\xa9\x18\xb9\xed\x4c\xf8\x70\x9e\x35\x96\x69\xc5\x96\x24\x67\xb0\xdb\x66\x28\x87\xe8\xf6\x4e\x76\x77\xb5\x8a\xe1\xbd\xf6\xec\x8f\xe0\x0f\xef\x51\x05\x3e\x02\xbf\x87\x1c\x2c\x21\xa8\x15\xcc\x1b\x47\x79\x04\xbe\xe9\xd3\x14\x95\xb3\x59\x1c\xd8\xcf\x7a\x3f\x60\xdd\x3a\x96\x31\x6d\xca\x1f\xb4\x5d\x27\x75\x83\xe8\xc7\xf3\xb4\xbe\x98\x9a\xf1\xae\x57\x46\x8f\x68\xc8\xed\x4f\xd5\x0d\xb4\xca\x4a\x33\xe6\xad\xca\x93\xb5\x93\x0a\xe4\x3c\xd8\xcc\x60\xab\x49\x56\x59\x5d\xd5\x56\xd5\x5d\x77\x32\x2f\xbc\x7b\x1f\xd9\x90\x4f\x5f\x9d\x05\xb9\xf8\xe7\x87\x59\xcb\xd9\xc3\x86\x26\xf7\xd2\xfa\x2e\xa2\xec\x8f\xb1\x10\x32\x8d\xb6\xf5\x3f\xb7\x27\xb1\xf5\xaf\x3a\xc2\xbf\x01\x00\x00\xff\xff\x89\x10\xa4\x60\x10\x0f\x00\x00") +var _manifests02DeploymentIbmCloudManagedYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x57\x5f\x6f\xdb\x38\x12\x7f\xf7\xa7\x20\x72\x07\xf4\x0e\x28\x2d\x27\xbd\xf6\x5a\x01\x79\xf0\x39\x6a\x13\x20\x71\x0c\x3b\x68\x71\x4f\xc6\x98\x1a\xd9\x5c\x53\xa4\x4a\x0e\xdd\x08\x8b\xfd\xee\x0b\x4a\x8a\x2d\xc9\x4e\x52\xa0\xfb\xb0\x7e\x32\xc9\x99\xdf\x70\x7e\xf3\x87\xa3\x7f\xb0\x6f\xe3\xf9\xf4\x66\xfa\x25\x66\x57\xf7\x6c\x7a\xff\xc0\x92\xab\x9b\x87\xb7\x6c\x8d\x1a\x2d\x10\xa6\x6c\x55\xb2\x0d\x88\x6d\xe4\x8b\x14\x08\x79\x61\x4d\x26\x15\xf2\x1c\xb4\xcc\xd0\x91\x1b\xba\xcd\x00\x0a\xf9\x15\xad\x93\x46\xc7\x0c\x8a\xc2\x45\xbb\xf3\xc1\x56\xea\x34\x66\x57\x58\x28\x53\xe6\xa8\x69\x90\x23\x41\x0a\x04\xf1\x80\x31\xd0\xda\x10\x90\x34\xda\x85\x25\x63\xc2\xe8\x4c\xae\x87\xa6\x40\xed\x36\x32\xa3\xa1\x34\x91\xd4\xbf\xa1\xa0\x60\xf1\xb1\x8c\x99\xd4\x6b\x8b\xce\x71\x53\x84\x8b\x19\x5b\xa9\x49\x2d\x94\x4f\x71\x68\x51\x21\x38\xec\xe9\xaf\x72\x2e\x94\xf1\x69\xb8\x2c\xac\x31\x8d\xd9\x19\x59\x8f\x67\x03\xc6\x34\xe4\x78\x12\x33\x1c\xb8\x02\x04\xc6\x6c\x0f\xc6\x8f\xe4\x5c\x81\x22\x5c\xdc\x62\xa1\xa4\x00\x17\xb3\xf3\x01\x63\x0e\x15\x0a\x32\xb6\x76\x29\x07\x12\x9b\x5b\x58\xa1\x6a\x7c\x7c\xc1\xaa\xa3\x40\xf6\xba\xac\x05\xa9\x2c\x30\x66\x73\x14\x16\x81\x70\xc0\x18\x61\x5e\x28\x20\x6c\x80\x5b\x44\x86\xdf\x11\x99\x15\x06\xd8\x35\xd2\xf0\x87\xb1\x5b\x65\x20\xed\x32\x53\xf3\x11\xa2\x12\xb3\x37\xbf\x9f\x61\x96\xa1\xa0\xb3\x98\x9d\xcd\x2c\x66\x68\x2d\xa6\x57\xde\x4a\xbd\x5e\x88\x0d\xa6\x5e\x49\xbd\x3e\xfb\xe3\x4d\x03\xad\x3a\x1e\xbd\xe0\x13\x63\x4f\x2c\x35\x01\x26\x90\x1a\xed\x5e\x95\x33\x61\xf2\x1c\x74\x7a\xc0\xe2\xa7\x71\xea\x23\x47\x60\xa9\xb5\xe6\x7c\x1f\xab\xd6\xee\x3f\xff\xf5\x6d\xfc\x30\xb9\x5e\x4e\xc7\x77\xc9\x62\x36\x9e\x24\xff\xee\xa8\xc8\x1c\xd6\x5d\xf1\x9b\xbb\xf1\x97\x9e\x90\x00\x0d\xb6\x3c\x21\x3b\x19\x4f\xc7\xf3\xff\x2f\x4f\xa8\x34\x09\xc8\x77\x75\x19\x74\xb4\xe6\xc9\x6d\x32\x5e\x24\xcb\xaf\xc9\x7c\x71\x73\x3f\x3d\x28\xa2\xde\xb5\x7d\xaf\x99\xec\x49\xef\xcf\x19\xdb\x81\xf2\x18\xb3\xd1\x70\x34\x3c\xe7\x4e\x43\xe1\x36\x86\x8e\xf4\x7b\xee\xf7\xf5\x3f\x5b\x93\xc7\xad\x4d\xc6\x32\x89\x2a\x9d\x63\xd6\xdd\x6d\xf6\x67\x40\x9b\x78\x9f\x71\xc3\x53\x8c\xd7\x66\x2b\x4e\x8e\x2f\xfb\xdd\x43\x19\x32\xce\x66\x16\x53\x24\xc2\xe8\x50\x55\xd6\x78\x42\x1b\x6f\xa0\xaa\xf0\x66\xc9\xc5\x28\x7b\x2f\x3e\x62\x76\x64\xa1\x4d\xfe\xb1\xa1\x3d\x6c\x64\xac\x5c\x4b\xcd\x85\xf2\x2e\xe0\xf5\x33\x2a\x0e\x85\xe4\x0e\xbc\x55\x51\xfe\x55\xfd\x99\x57\x6a\x66\x94\x14\x65\xcc\x6e\xb2\xa9\xa1\x99\x45\x17\x9a\xde\x93\xd4\x0b\x55\x12\x7e\x16\x9d\xf1\x56\xa0\x6b\x07\xc1\xe2\x77\x1f\x3a\x6c\x37\x30\xa2\xf0\x31\x3b\x1f\xe5\x9d\xcd\x1c\x73\x63\xcb\x98\xbd\xff\x70\x27\xf7\x07\x0e\x85\xb7\x92\xca\x89\xd1\x84\x8f\xd4\x86\x01\xa5\xcc\x8f\x99\x95\x3b\xa9\x70\x8d\x89\x13\xa0\xaa\x06\x12\xb3\x0c\x94\xc3\x96\xa4\x80\x02\x56\x52\x49\x92\xd8\xbb\x48\x6a\x4d\xd1\xdd\xe1\x6c\x7c\x7b\x7b\xe8\x40\x68\x73\xa9\x2b\xd8\x3b\x74\x2e\xb0\xd4\x30\xf4\x19\x94\x5a\x81\xd8\x3e\x98\x5b\xb3\x76\xf7\x3a\xb1\xb6\xc5\xc5\xce\x28\x9f\xe3\x9d\xf1\xba\xed\x3a\x67\x79\xd8\xa9\xf3\x31\x42\x12\x51\xb1\x95\x91\x00\x4e\xd6\x3b\x8a\xf0\x91\x2c\x08\xc2\x34\x2a\xb0\x4d\x4d\xcd\x7b\x25\x83\x29\x17\xd0\xa1\x17\xd2\x7b\xad\xca\xea\x18\x9f\x31\xb4\x03\x1b\x59\xaf\x23\x17\x9a\x31\xb9\x43\xf6\x46\x0e\xed\x4e\x0a\x04\x21\x82\xf8\x91\xc9\x95\xf1\x3a\xe5\x0e\x38\x99\x2d\xea\xd7\xcc\x72\x06\x76\xdd\xf1\x96\x73\x65\xd6\x64\x1c\xa5\x68\x6d\x67\xbf\x0a\x2b\x72\x25\x1d\xa1\xe6\x90\xa6\x21\xa5\x2e\xe3\x4f\xef\x3e\xbd\xeb\xc8\x91\x72\x5c\xc8\x62\x83\x96\x3b\x2f\x09\xdd\xe5\xc3\xed\x62\x99\x4c\xae\xae\x93\xe5\x7c\x31\x5e\x7e\xbb\x79\xb8\x5e\x8e\x93\xc5\xf2\xfc\xe2\xe3\xf2\xcb\xe4\x6e\xb9\xb8\x1e\x5f\xbc\xff\xf0\xf6\x20\x95\x4c\xae\x5e\x91\x3b\xc2\x99\xfc\x6f\xf2\x53\x38\x27\xe5\x5e\x40\xeb\x78\xe6\x0b\x47\x16\x21\xbf\xdc\x10\x15\x71\x14\x9d\x5f\xfc\x77\x58\x75\xc5\xf8\xc3\x68\x34\x1a\x45\xc7\x34\xa0\x25\x1e\xc6\x96\xcb\x2a\x73\x48\xb9\xa8\xb0\x72\x07\x84\xe1\xff\x50\xf4\xde\x95\xa0\xd2\x9c\xf3\x2d\x96\x2f\x68\x6e\xb1\xec\x37\x92\xa7\x8e\x77\xd4\x50\xb6\x7e\x85\xdc\xae\x40\x34\x13\x4d\xaf\x8f\xd4\x69\xd3\x13\xda\x9f\x16\xc6\x76\xab\x61\xff\x9e\xce\x8c\xa5\x98\x75\xa2\xff\x04\x96\x23\x59\x29\xdc\x5f\xde\x65\xfe\x33\xfa\xfb\x74\x99\x9f\xed\x16\xad\xc8\x3d\xc7\x53\x88\xfb\x6b\x85\xaa\x4d\x8a\x8b\xce\x98\x17\x7e\x21\x6a\x56\x23\xa1\xab\x22\xef\x62\xa6\xa4\xf6\x8f\xcd\x79\x61\xa5\xa9\x18\x52\xe0\xdc\xb4\xb2\xe8\x4a\x47\x98\xef\xdf\x18\x61\x25\x49\x01\x6a\xf0\x0a\xa5\xd6\xeb\xb1\x9b\x1a\x3d\x37\x86\x7a\x6d\xcb\xa1\x10\x26\x2f\x66\xf5\x78\xde\x66\xac\x99\x26\xbd\x26\x99\xe3\x15\x66\xe0\x15\xed\x0d\x55\x3d\x6c\x5c\xf7\xb0\xe9\x4b\x6f\x14\x19\x15\xd6\xed\x21\x93\xb3\x7a\x70\x8c\xd9\xd4\x34\x93\xe2\xe1\x3e\x5b\x2c\xe3\x8a\x2d\x6e\x8d\xc2\x61\x97\xa1\x1c\x82\xdb\x7b\xd9\xfd\xd3\xca\x92\x47\xe9\xc8\x9d\xc0\x4f\x1e\x51\x78\x3a\x01\xdf\x43\xf6\xda\x22\x88\x0d\xac\x5a\x57\x79\x06\xbe\xed\xd3\x02\x85\xd1\x69\x18\xe3\x2f\x46\xbf\x60\x5d\x1b\xe2\x21\x6d\xca\x5f\xb4\x5d\x27\x75\x8b\xe8\xe7\xf3\xb4\x7e\x98\xda\xf1\xae\x77\xa6\xcf\x68\xf0\xe6\x53\xeb\x0e\x3a\x65\x25\x09\xf3\x4e\xe5\xf1\xda\x49\x01\x7c\xe5\x75\xaa\xb0\xd3\x24\xab\xac\xae\x6a\xab\xea\xae\x7b\x99\x9f\x7c\x7b\x9f\x39\xe0\x2f\x3f\x9d\x85\x35\xe1\x7b\x10\xd3\x8e\xb3\xc7\x0d\x8d\xf7\xd2\xfa\x21\xa0\xf4\x87\x5b\xf0\xa9\x44\xdd\xf9\xca\xeb\x49\x34\xfe\x55\x57\xf8\x33\x00\x00\xff\xff\xb0\xb4\xf0\xe1\x26\x0f\x00\x00") func manifests02DeploymentIbmCloudManagedYamlBytes() ([]byte, error) { return bindataRead( @@ -801,12 +801,12 @@ func manifests02DeploymentIbmCloudManagedYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "manifests/02-deployment-ibm-cloud-managed.yaml", size: 3856, mode: os.FileMode(420), modTime: time.Unix(1, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3d, 0x41, 0x2d, 0xa2, 0x67, 0xcd, 0x8b, 0x3d, 0x43, 0x65, 0x5c, 0xda, 0xa4, 0x97, 0x60, 0xf0, 0x7b, 0xf7, 0x82, 0xbb, 0xe6, 0x14, 0x7, 0xd3, 0xe4, 0x28, 0xd9, 0xb6, 0xe0, 0x28, 0x16, 0x24}} + info := bindataFileInfo{name: "manifests/02-deployment-ibm-cloud-managed.yaml", size: 3878, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xac, 0x21, 0xa5, 0x47, 0x68, 0x14, 0x1f, 0x64, 0x24, 0x1d, 0x12, 0xf1, 0x3e, 0xfe, 0xc3, 0xb8, 0x77, 0x87, 0x18, 0x2a, 0xf0, 0x61, 0xda, 0xd1, 0x5f, 0x91, 0x21, 0xd1, 0x79, 0x91, 0xde, 0x98}} return a, nil } -var _manifests02DeploymentYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x58\x6f\x6f\xf3\xb8\x0d\x7f\xdf\x4f\x21\x78\x03\x6e\x03\x4e\x49\xda\xbb\x7b\xb6\x33\xd0\x17\x59\x9a\xe7\x5a\xa0\x69\x83\xa4\xb8\x62\x18\x86\x80\x91\x99\x58\x8b\x2c\xf9\x28\x2a\x4f\x83\x61\xdf\x7d\x90\xe3\xfc\xb1\x93\xa6\x7d\x73\xc0\xf9\x55\x6a\x91\x3f\x92\x3f\x52\x24\x5d\x28\xf5\xaf\x48\x5e\x3b\x9b\x0a\x28\x4b\xdf\x5d\x5f\x5f\xad\xb4\xcd\x52\x71\x87\xa5\x71\x9b\x02\x2d\x5f\x15\xc8\x90\x01\x43\x7a\x25\x84\x85\x02\x53\xa1\xed\x92\xd0\x7b\xe9\x4a\x24\x60\x47\xf5\x81\x2f\x41\x61\x2a\x5c\x89\xd6\xe7\x7a\xc1\xf2\x8c\x1c\x58\xeb\x18\x58\x3b\xeb\x23\x9e\x10\xca\xd9\x85\x5e\x76\xf6\x4a\x1d\xed\xba\xda\xfe\x07\x15\xcb\x92\xdc\xdb\xe6\xac\x35\x21\xb4\x55\x26\x64\xd8\x21\x34\x08\x1e\x9b\xfa\x1e\xcd\x42\x16\x60\x61\x89\x99\xcc\xf5\x32\x97\xb0\x06\x6d\x60\xae\x8d\xe6\x4d\x2a\x12\xa6\x80\xc9\x27\x70\xb4\x5d\x1a\x94\xd6\x65\x28\x33\x5c\xa3\x89\x2e\xec\xd5\x7d\x89\x2a\xc6\x40\x58\x1a\xad\xc0\xa7\xe2\xfa\x4a\x08\xcf\x04\x8c\xcb\xcd\x36\x3a\xde\x94\x98\x8a\x09\x2a\x42\x60\x8c\xc7\x68\x50\xb1\xa3\xed\x71\x01\xac\xf2\x47\x98\xa3\xa9\xd9\xb8\xc0\x30\x63\x51\x1a\x60\xac\x35\x8f\x92\x12\x9f\x13\x5e\x2b\xeb\x40\x4b\xe4\xce\x37\x47\x2b\xe3\x20\x6b\x06\xb7\xe5\x27\x66\x38\x15\xdf\xfd\x37\xc1\xc5\x02\x15\x27\xa9\x48\xc6\x84\x0b\x24\xc2\xec\x2e\x90\xb6\xcb\xa9\xca\x31\x0b\x46\xdb\x65\xf2\xbf\xef\x6a\x68\xd3\x70\xf9\x82\xd3\x42\xec\x58\xaa\x7e\xa3\x0a\xa4\x79\x33\x70\x96\xf1\x8d\x0f\xfa\x14\x6c\xdf\x3f\x39\x3b\x71\x8e\x53\x11\xe9\xdd\x1f\x79\x54\xca\x15\xe5\x98\xdc\x42\x1b\x3c\xa8\xec\xb9\x0d\x96\x75\x81\x77\xb8\x80\x60\x78\x47\xa2\xcb\x70\xda\x60\x3a\x3e\xab\x30\x47\xb2\xc8\xe8\x63\xfc\xce\xa7\xc2\x68\x1b\xde\x0e\x51\xc4\x3c\x93\x33\xd8\x69\x4a\x16\xe0\xb9\xca\x7b\x52\x8b\xb2\x33\x31\xc2\x63\xb2\xa5\x58\x61\xac\xac\xcb\x18\xc9\xde\xd6\x8e\xa3\x54\x24\xc3\x37\xed\xd9\x1f\x8e\xb6\x99\x48\x45\xf2\xe4\x6a\xee\x31\x39\x63\xa5\x65\x20\x58\x42\x50\x39\xcc\x0f\xd2\x9f\xb5\x32\x7c\x43\x15\xf8\x48\xed\x10\xdf\x14\x95\xb3\x59\x2c\xed\x9b\xde\xc7\x3e\x58\xc7\x92\x10\xb2\xcd\xef\xeb\x81\x47\x5a\x6b\x85\x7d\xa5\x5c\xb0\xfc\xf4\x7e\xed\x09\x51\x92\x76\x55\xc5\x19\xf0\x7e\x2b\xe9\x37\x9e\xb1\x90\xca\x84\x98\x11\xa9\x48\xb3\x56\x60\x6a\x05\xe5\x2c\x83\xb6\x48\x47\xd5\x2d\x2f\xd5\xf7\x07\x95\x1d\x1f\x30\xc6\x7d\x1b\x93\x5e\x6b\x83\x4b\x1c\x7a\x05\xa6\x0a\x2d\x15\x0b\x30\x1e\x1b\xb2\x0a\xca\x6d\x9b\xd2\xe8\x9b\x28\x42\x64\xe4\xca\x54\xfc\x2b\xe9\x3f\x3e\x26\xff\x3e\xbe\x08\x48\x85\xb6\x15\xe4\x08\xbd\x87\x25\x8e\x9d\xd1\x6a\x93\x8a\xaf\x60\xcc\x1c\xd4\xea\xc5\x3d\xba\xa5\x7f\xb6\x43\xa2\x86\xdb\xba\x88\xc2\xc1\x98\x9d\xc2\xc3\xe2\xc9\xf1\x98\xd0\xc7\xc6\xdf\x92\x3b\xea\xec\x5d\x47\x7a\xa9\xed\x9e\xc4\x36\x33\x69\x6c\x54\xfe\x18\x41\xb9\xa2\x00\x9b\x1d\x87\x24\x2f\x11\x2a\x85\x67\x20\x6e\xbc\x91\x72\x3f\x65\x1a\xef\x93\x3f\xff\xe5\xb5\xff\x32\xb8\x9f\x3d\xf5\x47\xc3\xe9\xb8\x3f\x18\xfe\x35\x69\x29\x56\x01\xb4\x95\x1e\x46\xfd\x5f\x4e\x45\x15\x58\xa0\xcd\x79\x8d\x41\xff\xa9\x3f\xf9\xe7\xec\xbc\x62\x3d\x42\xe4\x7a\x3b\x51\xdb\xba\x93\xe1\xe3\xb0\x3f\x1d\xce\x7e\x1d\x4e\xa6\x0f\xcf\x4f\x0d\x75\xb4\xeb\x66\xb2\x77\x25\xd7\x52\x6a\x15\xc4\x1a\x4c\xc0\x54\x24\xbd\x4e\xaf\x73\x2d\xbd\x85\xd2\xe7\x8e\x93\xb3\x48\x2d\x86\xce\x21\x7d\x25\x57\xb4\x6b\x4e\x88\x85\x46\x93\x4d\x70\x71\x7a\x52\x9f\x8d\x81\xf3\x74\x3f\x90\x3a\xe7\x72\x74\x70\xa3\xa2\xee\x7c\x18\x27\xd5\x95\x43\xb5\x01\x48\x72\x21\xf6\xdf\xf5\x8f\x9d\xde\x59\xcc\xe3\xac\x7c\x12\xfa\xf3\x85\x4b\xe8\x5d\x20\xd5\xbe\x8d\x84\xbf\x05\xf4\x7c\x72\x47\x55\x19\x52\x71\xdd\x2b\x5a\xaf\x0b\x2c\x1c\x6d\x52\xf1\xd3\x97\x91\x3e\x3a\x5a\x3b\x13\x0a\x1c\xc5\x36\xe6\x9b\x57\x63\x1b\x19\x53\x74\x33\x93\x0a\x1a\x78\x45\x54\xd8\xd2\xde\x45\x56\xdd\x72\xa5\xbb\x0a\x64\x25\xdd\xc5\x37\x26\x50\x8c\x59\xb7\xc4\xa2\xe5\x33\x64\xcf\xd6\x6c\x5a\x13\xf6\x60\x6e\xee\x82\xcd\xa4\x07\xc9\x6e\x85\xf6\x5d\x93\x6b\xa0\x2e\x05\xdb\xf5\x71\xad\x61\xdf\x3d\xb0\x5b\x77\x66\xd8\x76\xe6\xcf\x18\xdf\x99\x8e\x83\x44\xd2\x1c\xd4\x76\xeb\xfb\xa3\xb4\xd6\xba\xf1\xfd\x16\x60\x53\xed\x0c\xed\x3a\x6a\xb9\x7d\x5a\x3e\x40\xcb\x56\x66\xa5\x34\x6e\xc9\xce\x73\x86\x44\xad\x93\x2a\x56\x94\x46\x7b\x46\x2b\x21\xcb\x62\x71\xde\xa6\x3f\xff\xf0\xf3\x0f\x2d\x49\x36\x5e\x2a\x5d\xe6\x48\xd2\x07\xcd\xe8\x6f\x5f\x1e\xa7\xb3\xe1\xe0\xee\x7e\x38\x9b\x4c\xfb\xb3\xd7\x87\x97\xfb\x59\x7f\x38\x9d\x5d\xdf\xfc\x7d\xf6\xcb\x60\x34\x9b\xde\xf7\x6f\x7e\xfa\xf2\xfd\x41\x6a\x38\xb8\xfb\x40\xee\x04\x67\xf0\x8f\xc1\xa7\x70\xce\xca\x5d\x40\x6b\xc5\x16\x4a\xcf\x84\x50\xdc\xe6\xcc\x65\xda\xed\x5e\xdf\xfc\xad\x53\xb5\xb8\xf4\x4b\xaf\xd7\xeb\x75\xcf\x51\x81\xc4\x32\x2e\x88\xb7\xd5\x85\x60\xe3\xbb\x25\xe9\x35\x30\xc6\xdf\x1d\x75\x32\x49\xa2\x52\x2d\x21\x57\xb8\xb9\xa0\xbb\xc2\xe3\x6a\x2c\x1d\xb5\xaf\xea\x7e\x65\x18\x3b\xe2\x54\xb4\x92\xb5\x5b\x8e\x0b\x64\xd2\xca\xff\x6e\x9d\xe5\xc7\xde\x27\x3b\x4b\xbb\x79\x1c\xc5\xfb\xbe\xdb\x91\xaf\x8f\xaf\xf3\xd6\xe8\xd1\x4e\xfc\x3e\xc6\xb6\x77\xa4\xcd\x7b\x4e\x58\x6f\x73\xa7\x1a\x17\x3a\xe2\xf6\x0b\x72\x04\xe5\x31\xda\x85\xfe\xa9\x19\x0b\xdf\x1e\xb7\xd5\x4e\xab\x40\xce\x83\xcd\x0c\xb6\x0a\x26\x3e\x65\xc5\x58\x55\x6b\x7b\xa9\x43\x83\xfd\x93\x78\xc9\xb5\xdf\x6d\xa6\xa2\x6e\x80\xa2\xea\xa3\x42\x81\x15\x73\x14\xc1\x63\x26\xd8\x89\x92\xdc\x5a\x67\x28\x74\x86\x96\x35\x6f\x84\x0b\xec\xe3\x0b\xce\x51\xd4\x83\xa9\xb3\xc7\xfd\xea\x48\xe0\x1b\x14\xa5\xc1\xef\x05\x47\x23\xa7\xa0\xdf\x34\xe7\xa2\xef\x7d\x28\x70\xe2\x0c\xbe\x6a\xce\x5f\x71\xfe\xb0\xc3\x67\x27\x20\x70\x1e\xff\x52\xc0\x58\x8b\xbf\x4e\x45\x88\x1f\xb8\xe2\xa1\x3f\x12\xcf\x0f\x77\x83\x9d\x63\x24\xc0\x66\x62\xfa\x32\xed\xb4\xb8\x7f\x67\x3c\x94\xe4\xe2\xe7\x3a\x36\x96\xbb\x33\xa5\x2d\x5b\x8b\xfb\x4b\x44\x69\x57\x79\x4d\xf3\xc9\xfc\x11\x02\x42\xa6\xd1\x36\xfe\xc1\x70\xf5\xff\x00\x00\x00\xff\xff\x73\xdf\xd0\x5e\xc0\x10\x00\x00") +var _manifests02DeploymentYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x58\x6d\x6f\xe3\xb8\x11\xfe\x9e\x5f\x41\xa8\x05\xae\x05\x8e\x96\xb3\xd7\xdd\xde\x09\xd8\x0f\xae\xe3\xbd\x0d\x10\x67\x0d\x3b\xb8\xa0\x28\x0a\x63\x4c\x8d\x2d\xd6\x14\xa9\x1b\x0e\xbd\x31\x8a\xfe\xf7\x82\xb2\xfc\x22\xd9\xf1\x06\x28\x16\xa8\x3e\xc5\xe2\xbc\x3e\x33\x7c\x66\x14\xa8\xf4\x6f\x48\x5e\x3b\x9b\x09\xa8\x2a\x9f\x6e\x6e\x6f\xd6\xda\xe6\x99\xb8\xc3\xca\xb8\x6d\x89\x96\x6f\x4a\x64\xc8\x81\x21\xbb\x11\xc2\x42\x89\x99\xd0\x76\x45\xe8\xbd\x74\x15\x12\xb0\xa3\xe6\xc0\x57\xa0\x30\x13\xae\x42\xeb\x0b\xbd\x64\x79\x41\x0e\xac\x75\x0c\xac\x9d\xf5\xd1\x9e\x10\xca\xd9\xa5\x5e\xf5\x0e\x4a\x3d\xed\x52\x6d\xff\x85\x8a\x65\x45\xee\x65\x7b\xd1\x9b\x10\xda\x2a\x13\x72\xec\x11\x1a\x04\x8f\x6d\x7d\x8f\x66\x29\x4b\xb0\xb0\xc2\x5c\x16\x7a\x55\x48\xd8\x80\x36\xb0\xd0\x46\xf3\x36\x13\x09\x53\xc0\xe4\x0d\x76\xb4\x5d\x19\x94\xd6\xe5\x28\x73\xdc\xa0\x89\x21\x1c\xd4\x7d\x85\x2a\xe6\x40\x58\x19\xad\xc0\x67\xe2\xf6\x46\x08\xcf\x04\x8c\xab\xed\x2e\x3b\xde\x56\x98\x89\x29\x2a\x42\x60\x8c\xc7\x68\x50\xb1\xa3\xdd\x71\x09\xac\x8a\x07\x58\xa0\x69\xd0\xb8\x82\x30\x63\x59\x19\x60\x6c\x34\x4f\x8a\x12\x9f\x33\x5c\x6b\xef\x40\x2b\xe4\xde\x57\x47\x6b\xe3\x20\x6f\x27\xb7\xc3\x27\x56\x38\x13\x3f\xfc\x3b\xc1\xe5\x12\x15\x27\x99\x48\x26\x84\x4b\x24\xc2\xfc\x2e\x90\xb6\xab\x99\x2a\x30\x0f\x46\xdb\x55\xf2\x9f\x1f\x1a\xd3\xa6\x15\xf2\x95\xa0\x85\xd8\xa3\x54\xff\x8d\x2a\x90\xe6\xed\xd0\x59\xc6\x17\x3e\xea\x53\xb0\x03\xff\xe8\xec\xd4\x39\xce\x44\x84\xf7\x70\xe4\x51\x29\x57\x56\x13\x72\x4b\x6d\xf0\xa8\x72\xc0\x36\x58\xd6\x25\xde\xe1\x12\x82\xe1\x3d\x88\x2e\xc7\x59\x0b\xe9\xf8\xac\xc3\x02\xc9\x22\xa3\x8f\xf9\x3b\x9f\x09\xa3\x6d\x78\x39\x66\x11\xeb\x4c\xce\x60\xaf\x2d\x59\x82\xe7\xba\xee\x49\x23\xca\xce\xc4\x0c\x4f\xc1\x96\x62\x8d\xb1\xb3\xae\xdb\x48\x0e\xbe\xf6\x18\x65\x22\x19\xbd\x68\xcf\xfe\x78\xb4\xab\x44\x26\x92\x47\xd7\x60\x8f\xc9\x05\x2f\x1d\x07\xc1\x12\x82\x2a\x60\x71\x94\x7e\xab\x97\xd1\x0b\xaa\xc0\x27\x6a\xc7\xfc\x66\xa8\x9c\xcd\x63\x6b\xbf\xeb\x7f\x3b\x06\xeb\x58\x12\x42\xbe\xfd\xbe\x11\x78\xa4\x8d\x56\x38\x50\xca\x05\xcb\x8f\xaf\xf7\x9e\x10\x15\x69\x57\x77\x9c\x01\xef\x77\x92\x7e\xeb\x19\x4b\xa9\x4c\x88\x15\x91\x8a\x34\x6b\x05\xa6\x51\x50\xce\x32\x68\x8b\x74\xd2\xdd\xf2\x5a\x7f\x7f\xa3\xb3\xe3\x03\xc6\xb8\xaf\x13\xd2\x1b\x6d\x70\x85\x23\xaf\xc0\xd4\xa9\x65\x62\x09\xc6\x63\x4b\x56\x41\xb5\xa3\x29\x8d\xbe\x6d\x45\x88\x9c\x5c\x95\x89\x7f\x24\x83\x87\x87\xe4\x9f\xa7\x17\x01\xa9\xd4\xb6\x36\x39\x46\xef\x61\x85\x13\x67\xb4\xda\x66\xe2\x13\x18\xb3\x00\xb5\x7e\x72\x0f\x6e\xe5\xbf\xd8\x11\x51\x2b\x6c\x5d\x46\xe1\x60\xcc\x5e\xe1\x7e\xf9\xe8\x78\x42\xe8\x23\xf1\x77\xe4\x4e\x98\x3d\x75\xa4\x57\xda\x1e\x40\xec\x22\x93\x45\xa2\xf2\xa7\x16\x94\x2b\x4b\xb0\xf9\x69\x4a\xf2\x1a\xa0\x52\x78\x06\xe2\xd6\x1b\x29\x0f\x53\xa6\xf5\x3e\xf9\xe3\x9f\x9e\x07\x4f\xc3\xcf\xf3\xc7\xc1\x78\x34\x9b\x0c\x86\xa3\x3f\x27\x1d\xc5\x3a\x81\xae\xd2\xfd\x78\xf0\xeb\xb9\xa8\x02\x0b\xb4\xbd\xac\x31\x1c\x3c\x0e\xa6\x7f\x9f\x5f\x56\x6c\x46\x88\xdc\xec\x26\x6a\x57\x77\x3a\x7a\x18\x0d\x66\xa3\xf9\x6f\xa3\xe9\xec\xfe\xcb\x63\x4b\x1d\xed\xa6\x5d\xec\x7d\xcb\x75\x94\x3a\x0d\xb1\x01\x13\x30\x13\x49\xbf\xd7\xef\xdd\x4a\x6f\xa1\xf2\x85\xe3\xe4\xa2\xa5\x0e\x42\x97\x2c\x7d\x22\x57\x76\x7b\x4e\x88\xa5\x46\x93\x4f\x71\x79\x7e\xd2\x9c\x4d\x80\x8b\xec\x30\x90\x7a\x97\x6a\x74\x0c\xa3\x86\xee\x72\x1a\xbf\x07\xd8\x46\x16\xa1\x25\x61\x8e\xcc\x98\x1e\x37\x09\x72\x21\x52\x70\x01\xf5\x4e\xd0\xfc\x94\xaa\xbf\x7c\xaf\x7e\xc6\xe5\x45\x4f\xa7\xb5\xba\xec\xf0\x7f\x68\x67\x42\xef\x02\xa9\xee\x1d\x25\xfc\x3d\xa0\xe7\xb3\x9b\xab\xaa\x90\x89\xdb\x7e\xd9\x79\x5d\x62\xe9\x68\x9b\x89\xf7\x1f\xc6\xfa\xe4\x68\xe3\x4c\x28\x71\x1c\xc9\xcd\xb7\x2f\xcc\x2e\x33\xa6\x18\x66\x2e\x15\xb4\xec\x95\x51\x61\x57\x8c\x14\x59\xa5\xd5\x5a\xa7\x0a\x64\x2d\x9d\xe2\x0b\x13\x28\xc6\x3c\xad\xb0\xec\xc4\x0c\xf9\x17\x6b\xb6\x9d\xb9\x7b\x74\xb7\x70\xc1\xe6\xd2\x83\x64\xb7\x46\xfb\xaa\xcb\x0d\x50\x4a\xc1\xa6\x3e\x2e\x3b\xec\x8f\xc5\x4b\x1b\xbe\x86\x1d\x5f\xbf\xc5\xf9\xde\x75\x1c\x2f\x92\x16\xa0\x76\xbb\xe0\xff\x0b\xe1\x36\x74\xb8\x6f\xd8\xb3\x3e\xea\x84\x7d\xde\x3e\x40\xab\x4e\x65\xa5\x34\x6e\xc5\xce\x73\x8e\x44\x9d\x93\x3a\x57\x94\x46\x7b\x46\x2b\x21\xcf\x63\x73\x7e\xcc\x7e\xf9\xe9\x97\x9f\x3a\x92\x6c\xbc\x54\xba\x2a\x90\xa4\x0f\x9a\xd1\x7f\x7c\x7a\x98\xcd\x47\xc3\xbb\xcf\xa3\xf9\x74\x36\x98\x3f\xdf\x3f\x7d\x9e\x0f\x46\xb3\xf9\xed\xbb\x9f\xe7\xbf\x0e\xc7\xf3\xd9\xe7\xc1\xbb\xf7\x1f\x7e\x3c\x4a\x8d\x86\x77\xdf\x90\x3b\xb3\x33\xfc\xdb\xf0\x4d\x76\x2e\xca\x5d\xb1\xd6\xc9\x2d\x54\x9e\x09\xa1\xfc\x58\x30\x57\x59\x9a\xde\xbe\xfb\x6b\xaf\x26\xbe\xec\x43\xbf\xdf\xef\xa7\x97\xa0\x40\x62\x19\xd7\xc6\x8f\xf5\x85\x60\xe3\xd3\x8a\xf4\x06\x18\xe3\xdf\x3d\x75\x36\x5f\xa2\x52\x23\x21\xd7\xb8\xbd\xa2\xbb\xc6\xd3\x6e\xac\x1c\x75\xaf\xea\x61\x91\x98\x38\xe2\x4c\x74\x8a\xb5\x5f\x99\x4b\x64\xd2\xca\x7f\x37\x66\xf9\x4b\xff\x8d\xcc\xd2\x25\x8f\x93\x7c\x5f\x0f\x3b\xe2\xf5\xed\xeb\xbc\x73\x7a\xb2\x29\xbf\x6e\x63\xc7\x1d\x59\xfb\x9e\x13\x36\x3b\xde\xb9\xc6\x15\x46\xdc\x7d\x57\x8e\xa1\x3a\xb5\x76\x85\x3f\x35\x63\xe9\xbb\x43\xb8\xde\x74\x15\xc8\x45\xb0\xb9\xc1\x4e\xc3\xc4\xa7\xaa\x11\xab\x7b\xed\x20\x75\x24\xd8\x3f\x88\xa7\x42\xfb\xfd\xbe\x2a\x1a\x02\x14\x35\x8f\x0a\x05\x56\x2c\x50\x04\x8f\xb9\x60\x27\x2a\x72\x1b\x9d\xa3\xd0\x39\x5a\xd6\xbc\x15\x2e\xb0\x8f\x2f\xb8\x40\xd1\x0c\xa6\xde\xc1\xee\x27\x47\x02\x5f\xa0\xac\x0c\xfe\x28\x38\x3a\x39\x37\xfa\x55\x73\x21\x06\xde\x87\x12\xa7\xce\xe0\xb3\xe6\xe2\x19\x17\xf7\x7b\xfb\xec\x04\x04\x2e\xe2\x2f\x05\x8c\x8d\xf8\xf3\x4c\x84\xf8\xd9\x2b\xee\x07\x63\xf1\xe5\xfe\x6e\xb8\x0f\x8c\x04\xd8\x5c\xcc\x9e\x66\xbd\x0e\xf6\xaf\x8c\x87\x8a\x5c\xfc\x88\xc7\xd6\xca\x77\xa1\xb5\x65\x67\x9d\x7f\x8a\x56\xba\x5d\xde\xc0\x7c\x36\x7f\x84\x80\x90\x6b\xb4\xad\x7f\x3b\xdc\xfc\x37\x00\x00\xff\xff\x9b\x75\x0a\x08\xd6\x10\x00\x00") func manifests02DeploymentYamlBytes() ([]byte, error) { return bindataRead( @@ -821,8 +821,8 @@ func manifests02DeploymentYaml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "manifests/02-deployment.yaml", size: 4288, mode: os.FileMode(420), modTime: time.Unix(1, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe2, 0xdc, 0xf4, 0x85, 0x69, 0xac, 0xa8, 0x77, 0x8e, 0x57, 0xdc, 0xeb, 0xaf, 0x9a, 0x38, 0xee, 0xf2, 0x2, 0x89, 0x8, 0x16, 0x97, 0x9, 0xb1, 0xda, 0x1e, 0xe8, 0x3a, 0x13, 0x8, 0xfe, 0x41}} + info := bindataFileInfo{name: "manifests/02-deployment.yaml", size: 4310, mode: os.FileMode(420), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x41, 0x94, 0x51, 0x4e, 0xa, 0x63, 0xa9, 0xdd, 0x57, 0x2f, 0x8c, 0x5b, 0x16, 0xd0, 0x8f, 0xd6, 0xbf, 0xc5, 0x78, 0xe8, 0x4c, 0xef, 0x24, 0x3a, 0xd, 0x7c, 0x26, 0xe7, 0x20, 0xba, 0xb3, 0x34}} return a, nil } diff --git a/pkg/operator/controller/crl/crl_configmap.go b/pkg/operator/controller/crl/crl_configmap.go index 9c74166269..77f6aec264 100644 --- a/pkg/operator/controller/crl/crl_configmap.go +++ b/pkg/operator/controller/crl/crl_configmap.go @@ -1,19 +1,8 @@ package crl import ( - "bytes" "context" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/hex" - "encoding/pem" "fmt" - "io/ioutil" - "net/http" - "reflect" - "strings" - "time" operatorv1 "github.com/openshift/api/operator/v1" "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" @@ -21,18 +10,8 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kerrors "k8s.io/apimachinery/pkg/util/errors" ) -// authorityKeyIdentifier is a certificate's authority key identifier. -type authorityKeyIdentifier struct { - KeyIdentifier []byte `asn1:"optional,tag:0"` -} - -// authorityKeyIdentifierOID is the ASN.1 object identifier for the authority -// key identifier extension. -var authorityKeyIdentifierOID = asn1.ObjectIdentifier{2, 5, 29, 35} - // ensureCRLConfigmap ensures the client CA certificate revocation list // configmap exists for a given ingresscontroller if the ingresscontroller // specifies a client CA certificate bundle in which any certificates specify @@ -44,32 +23,9 @@ func (r *reconciler) ensureCRLConfigmap(ctx context.Context, ic *operatorv1.Ingr return false, nil, ctx, err } - var oldCRLs map[string]*pkix.CertificateList - if haveCM { - if data, ok := current.Data["crl.pem"]; ok { - if crls, err := buildCRLMap([]byte(data)); err != nil { - log.Error(err, "failed to parse current client CA configmap", "namespace", current.Namespace, "name", current.Name) - } else { - oldCRLs = crls - } - } - - } - - var clientCAData []byte - if haveClientCA { - clientCABundleFilename := "ca-bundle.pem" - if data, ok := clientCAConfigmap.Data[clientCABundleFilename]; !ok { - return haveCM, current, ctx, fmt.Errorf("client CA configmap %s/%s is missing %q", clientCAConfigmap.Namespace, clientCAConfigmap.Name, clientCABundleFilename) - } else { - clientCAData = []byte(data) - } - } - - wantCM, desired, ctx, err := desiredCRLConfigMap(ctx, ic, ownerRef, clientCAData, oldCRLs) - if err != nil { - return false, nil, ctx, fmt.Errorf("failed to build configmap: %w", err) - } + // The CRL management code has been moved into the router, so the CRL configmap is no longer necessary. + // TODO: Remove this whole controller after 4.13 + wantCM := false switch { case !wantCM && !haveCM: @@ -83,191 +39,8 @@ func (r *reconciler) ensureCRLConfigmap(ctx context.Context, ic *operatorv1.Ingr log.Info("deleted configmap", "namespace", current.Namespace, "name", current.Name) } return false, nil, ctx, nil - case wantCM && !haveCM: - if err := r.client.Create(ctx, desired); err != nil { - return false, nil, ctx, fmt.Errorf("failed to create configmap: %w", err) - } - log.Info("created configmap", "namespace", desired.Namespace, "name", desired.Name) - exists, current, err := r.currentCRLConfigMap(ctx, ic) - return exists, current, ctx, err - case wantCM && haveCM: - if updated, err := r.updateCRLConfigMap(ctx, current, desired); err != nil { - return true, current, ctx, fmt.Errorf("failed to update configmap: %w", err) - } else if updated { - log.Info("updated configmap", "namespace", desired.Namespace, "name", desired.Name) - exists, current, err := r.currentCRLConfigMap(ctx, ic) - return exists, current, ctx, err - } - } - - return true, current, ctx, nil -} - -// buildCRLMap builds a map of key identifier to certificate list using the -// provided PEM-encoded certificate revocation list. -func buildCRLMap(crlData []byte) (map[string]*pkix.CertificateList, error) { - crlForKeyId := make(map[string]*pkix.CertificateList) - for len(crlData) > 0 { - block, data := pem.Decode(crlData) - if block == nil { - break - } - crl, err := x509.ParseCRL(block.Bytes) - if err != nil { - return crlForKeyId, err - } - for _, ext := range crl.TBSCertList.Extensions { - if ext.Id.Equal(authorityKeyIdentifierOID) { - var authKeyId authorityKeyIdentifier - if _, err := asn1.Unmarshal(ext.Value, &authKeyId); err != nil { - return crlForKeyId, err - } - subjectKeyId := hex.EncodeToString(authKeyId.KeyIdentifier) - crlForKeyId[subjectKeyId] = crl - } - } - crlData = data - } - return crlForKeyId, nil -} - -// desiredCRLConfigMap returns the desired CRL configmap. Returns a Boolean -// indicating whether a configmap is desired, the configmap if one is desired, -// the context (containing the next CRL update time as "nextCRLUpdate"), and an -// error if one occurred -func desiredCRLConfigMap(ctx context.Context, ic *operatorv1.IngressController, ownerRef metav1.OwnerReference, clientCAData []byte, crls map[string]*pkix.CertificateList) (bool, *corev1.ConfigMap, context.Context, error) { - if len(ic.Spec.ClientTLS.ClientCertificatePolicy) == 0 || len(ic.Spec.ClientTLS.ClientCA.Name) == 0 { - return false, nil, ctx, nil - } - - if crls == nil { - crls = make(map[string]*pkix.CertificateList) - } - - var subjectKeyIds []string - var nextCRLUpdate time.Time - now := time.Now() - for len(clientCAData) > 0 { - block, data := pem.Decode(clientCAData) - if block == nil { - break - } - clientCAData = data - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return false, nil, ctx, fmt.Errorf("client CA configmap has an invalid certificate: %w", err) - } - subjectKeyId := hex.EncodeToString(cert.SubjectKeyId) - if len(cert.CRLDistributionPoints) == 0 { - continue - } - if crl, ok := crls[subjectKeyId]; ok { - if crl.HasExpired(now) { - log.Info("certificate revocation list has expired", "subject key identifier", subjectKeyId) - } else { - subjectKeyIds = append(subjectKeyIds, subjectKeyId) - if (nextCRLUpdate.IsZero() || crl.TBSCertList.NextUpdate.Before(nextCRLUpdate)) && crl.TBSCertList.NextUpdate.After(now) { - nextCRLUpdate = crl.TBSCertList.NextUpdate - } - continue - } - } - log.Info("retrieving certificate revocation list", "subject key identifier", subjectKeyId) - if crl, err := getCRL(cert.CRLDistributionPoints); err != nil { - // Creating or updating the configmap with incomplete - // data would compromise security by potentially - // permitting revoked certificates. - return false, nil, ctx, fmt.Errorf("failed to get certificate revocation list for certificate key %s: %w", subjectKeyId, err) - } else { - crls[subjectKeyId] = crl - subjectKeyIds = append(subjectKeyIds, subjectKeyId) - log.Info("new certificate revocation list", "subject key identifier", subjectKeyId, "next update", crl.TBSCertList.NextUpdate.String()) - if (nextCRLUpdate.IsZero() || crl.TBSCertList.NextUpdate.Before(nextCRLUpdate)) && crl.TBSCertList.NextUpdate.After(now) { - nextCRLUpdate = crl.TBSCertList.NextUpdate - } - } - } - - if len(subjectKeyIds) == 0 { - return false, nil, ctx, nil - } - - buf := &bytes.Buffer{} - for _, subjectKeyId := range subjectKeyIds { - asn1Data, err := asn1.Marshal(*crls[subjectKeyId]) - if err != nil { - return false, nil, ctx, fmt.Errorf("failed to encode ASN.1 for CRL for certificate key %s: %w", subjectKeyId, err) - } - block := &pem.Block{ - Type: "X509 CRL", - Bytes: asn1Data, - } - if err := pem.Encode(buf, block); err != nil { - return false, nil, ctx, fmt.Errorf("failed to encode PEM for CRL for certificate key %s: %w", subjectKeyId, err) - } - } - crlData := buf.String() - - crlConfigmapName := controller.CRLConfigMapName(ic) - crlConfigmap := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: crlConfigmapName.Name, - Namespace: crlConfigmapName.Namespace, - }, - Data: map[string]string{ - "crl.pem": crlData, - }, - } - crlConfigmap.SetOwnerReferences([]metav1.OwnerReference{ownerRef}) - - return true, &crlConfigmap, context.WithValue(ctx, "nextCRLUpdate", nextCRLUpdate), nil -} - -// getCRL gets a certificate revocation list using the provided distribution -// points and returns the certificate list. -func getCRL(distributionPoints []string) (*pkix.CertificateList, error) { - var errs []error - for _, distributionPoint := range distributionPoints { - // The distribution point is typically a URL with the "http" - // scheme. "https" is generally not used because the - // certificate list is signed, and because using TLS to get the - // certificate list could introduce a circular dependency - // (cannot use TLS without the revocation list, and cannot get - // the revocation list without using TLS). - // - // TODO Support ldap. - switch { - case strings.HasPrefix(distributionPoint, "http:"): - log.Info("retrieving CRL distribution point", "distribution point", distributionPoint) - crl, err := getHTTPCRL(distributionPoint) - if err != nil { - errs = append(errs, fmt.Errorf("error getting %q: %w", distributionPoint, err)) - continue - } - return crl, nil - default: - errs = append(errs, fmt.Errorf("unsupported distribution point type: %s", distributionPoint)) - } - } - return nil, kerrors.NewAggregate(errs) -} - -// getHTTPCRL gets a certificate revocation list using the provided HTTP URL. -func getHTTPCRL(url string) (*pkix.CertificateList, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("http.Get failed: %w", err) - } - defer resp.Body.Close() - bytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("error reading response: %w", err) - } - crl, err := x509.ParseCRL(bytes) - if err != nil { - return nil, fmt.Errorf("error parsing response: %w", err) } - return crl, nil + return false, nil, ctx, nil } // currentCRLConfigMap returns the current CRL configmap. Returns a Boolean @@ -283,30 +56,3 @@ func (r *reconciler) currentCRLConfigMap(ctx context.Context, ic *operatorv1.Ing } return true, cm, nil } - -// updateCRLConfigMap updates a configmap. Returns a Boolean indicating whether -// the configmap was updated, and an error value. -func (r *reconciler) updateCRLConfigMap(ctx context.Context, current, desired *corev1.ConfigMap) (bool, error) { - if crlConfigmapsEqual(current, desired) { - return false, nil - } - updated := current.DeepCopy() - updated.Data = desired.Data - if err := r.client.Update(ctx, updated); err != nil { - if errors.IsAlreadyExists(err) { - return false, nil - } - return false, err - } - return true, nil -} - -// crlConfigmapsEqual compares two CRL configmaps. Returns true if the -// configmaps should be considered equal for the purpose of determining whether -// an update is necessary, false otherwise -func crlConfigmapsEqual(a, b *corev1.ConfigMap) bool { - if !reflect.DeepEqual(a.Data, b.Data) { - return false - } - return true -} diff --git a/pkg/operator/controller/ingress/deployment.go b/pkg/operator/controller/ingress/deployment.go index a7b7e22840..e3bb3a42f1 100644 --- a/pkg/operator/controller/ingress/deployment.go +++ b/pkg/operator/controller/ingress/deployment.go @@ -2,9 +2,7 @@ package ingress import ( "context" - "crypto/x509" "encoding/json" - "encoding/pem" "fmt" "hash" "hash/fnv" @@ -19,6 +17,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/operator-framework/operator-lib/proxy" operatorv1 "github.com/openshift/api/operator/v1" @@ -87,7 +86,6 @@ const ( RouterClientAuthPolicy = "ROUTER_MUTUAL_TLS_AUTH" RouterClientAuthCA = "ROUTER_MUTUAL_TLS_AUTH_CA" - RouterClientAuthCRL = "ROUTER_MUTUAL_TLS_AUTH_CRL" RouterClientAuthFilter = "ROUTER_MUTUAL_TLS_AUTH_FILTER" RouterEnableCompression = "ROUTER_ENABLE_COMPRESSION" @@ -1024,68 +1022,6 @@ func desiredRouterDeployment(ci *operatorv1.IngressController, ingressController clientAuthCAPath := filepath.Join(clientCAVolumeMount.MountPath, clientCABundleFilename) env = append(env, corev1.EnvVar{Name: RouterClientAuthCA, Value: clientAuthCAPath}) - if haveClientCAConfigmap { - // If any certificates in the client CA bundle - // specify any CRL distribution points, then we - // need to configure a configmap volume. The - // crl controller is responsible for managing - // the configmap. - var clientCAData []byte - if v, ok := clientCAConfigmap.Data[clientCABundleFilename]; !ok { - return nil, fmt.Errorf("client CA configmap %s/%s is missing %q", clientCAConfigmap.Namespace, clientCAConfigmap.Name, clientCABundleFilename) - } else { - clientCAData = []byte(v) - } - var someClientCAHasCRL bool - for len(clientCAData) > 0 { - block, data := pem.Decode(clientCAData) - if block == nil { - break - } - clientCAData = data - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, fmt.Errorf("client CA configmap %s/%s has an invalid certificate: %w", clientCAConfigmap.Namespace, clientCAConfigmap.Name, err) - } - if len(cert.CRLDistributionPoints) != 0 { - someClientCAHasCRL = true - break - } - } - if someClientCAHasCRL { - clientCACRLSecretName := controller.CRLConfigMapName(ci) - clientCACRLVolumeName := "client-ca-crl" - clientCACRLVolumeMountPath := "/etc/pki/tls/client-ca-crl" - clientCACRLFilename := "crl.pem" - clientCACRLVolume := corev1.Volume{ - Name: clientCACRLVolumeName, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: clientCACRLSecretName.Name, - }, - Items: []corev1.KeyToPath{ - { - Key: clientCACRLFilename, - Path: clientCACRLFilename, - }, - }, - }, - }, - } - clientCACRLVolumeMount := corev1.VolumeMount{ - Name: clientCACRLVolumeName, - MountPath: clientCACRLVolumeMountPath, - ReadOnly: true, - } - volumes = append(volumes, clientCACRLVolume) - routerVolumeMounts = append(routerVolumeMounts, clientCACRLVolumeMount) - - clientAuthCRLPath := filepath.Join(clientCACRLVolumeMount.MountPath, clientCACRLFilename) - env = append(env, corev1.EnvVar{Name: RouterClientAuthCRL, Value: clientAuthCRLPath}) - } - } - if len(ci.Spec.ClientTLS.AllowedSubjectPatterns) != 0 { pattern := "(?:" + strings.Join(ci.Spec.ClientTLS.AllowedSubjectPatterns, "|") + ")" env = append(env, corev1.EnvVar{Name: RouterClientAuthFilter, Value: pattern}) @@ -1105,6 +1041,11 @@ func desiredRouterDeployment(ci *operatorv1.IngressController, ingressController env = append(env, corev1.EnvVar{Name: RouterCompressionMIMETypes, Value: strings.Join(mimes, " ")}) } + proxyVars := proxy.ReadProxyVarsFromEnv() + if len(proxyVars) != 0 { + env = append(env, proxyVars...) + } + // Add the environment variables to the container deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, env...) diff --git a/test/e2e/all_test.go b/test/e2e/all_test.go index 2f70de3d7e..4fa1e87bf8 100644 --- a/test/e2e/all_test.go +++ b/test/e2e/all_test.go @@ -22,6 +22,7 @@ func TestAll(t *testing.T) { t.Run("parallel", func(t *testing.T) { t.Run("TestAWSELBConnectionIdleTimeout", TestAWSELBConnectionIdleTimeout) t.Run("TestClientTLS", TestClientTLS) + t.Run("TestMTLSWithCRLs", TestMTLSWithCRLs) t.Run("TestContainerLogging", TestContainerLogging) t.Run("TestCustomErrorpages", TestCustomErrorpages) t.Run("TestCustomIngressClass", TestCustomIngressClass) diff --git a/test/e2e/client_tls_test.go b/test/e2e/client_tls_test.go index 12766fa586..f000a01539 100644 --- a/test/e2e/client_tls_test.go +++ b/test/e2e/client_tls_test.go @@ -13,6 +13,8 @@ import ( "encoding/pem" "fmt" "math/big" + "path/filepath" + "strconv" "strings" "testing" "time" @@ -20,22 +22,19 @@ import ( configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" - "sigs.k8s.io/controller-runtime/pkg/client/config" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apiserver/pkg/storage/names" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" - - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/remotecommand" ) // TestClientTLS generates a CA certificate, uses it to sign some client @@ -164,11 +163,11 @@ func TestClientTLS(t *testing.T) { Namespace: "openshift-ingress", }, Data: map[string]string{ - "valid-matching.crt": encodeCert(validMatchingCert), + "valid-matching.pem": encodeCert(validMatchingCert), "valid-matching.key": encodeKey(validMatchingKey), - "valid-mismatching.crt": encodeCert(validMismatchingCert), + "valid-mismatching.pem": encodeCert(validMismatchingCert), "valid-mismatching.key": encodeKey(validMismatchingKey), - "invalid-matching.crt": encodeCert(invalidMatchingCert), + "invalid-matching.pem": encodeCert(invalidMatchingCert), "invalid-matching.key": encodeKey(invalidMatchingKey), }, } @@ -237,73 +236,6 @@ func TestClientTLS(t *testing.T) { t.Fatalf("timed out waiting for pod %q to become ready: %v", clientPodName, err) } - // We need a client-go client in order to execute commands in the client - // pod. - kubeConfig, err := config.GetConfig() - if err != nil { - t.Fatalf("failed to get kube config: %v", err) - } - cl, err := kubernetes.NewForConfig(kubeConfig) - if err != nil { - t.Fatalf("failed to create kube client: %v", err) - } - - // curl execs a Curl command in the test client pod and returns an error - // value. The Curl command uses the specified certificate from the - // client certificates configmap, sends a request for the canary route - // via the router's internal service, and returns an error if the Curl - // command fails or the HTTP response status code indicates an error. - curl := func(cert string) error { - req := cl.CoreV1().RESTClient().Post().Resource("pods"). - Namespace(clientPod.Namespace).Name(clientPod.Name). - SubResource("exec"). - Param("container", clientPod.Spec.Containers[0].Name) - cmd := []string{ - "/bin/curl", "-k", "-v", - "-w", "%{http_code}", - "--retry", "10", "--retry-delay", "1", - } - if len(cert) != 0 { - cmd = append(cmd, - "--cert", fmt.Sprintf("/tmp/tls/%s.crt", cert), - "--key", fmt.Sprintf("/tmp/tls/%s.key", cert), - ) - } - cmd = append(cmd, "--resolve", - fmt.Sprintf("%s:443:%s", route.Spec.Host, - service.Spec.ClusterIP), - fmt.Sprintf("https://%s", route.Spec.Host), - ) - req.VersionedParams(&corev1.PodExecOptions{ - Container: "curl", - Command: cmd, - Stdout: true, - Stderr: true, - }, scheme.ParameterCodec) - exec, err := remotecommand.NewSPDYExecutor(kubeConfig, "POST", req.URL()) - if err != nil { - return err - } - var stdout, stderr bytes.Buffer - err = exec.Stream(remotecommand.StreamOptions{ - Stdout: &stdout, - Stderr: &stderr, - }) - stdoutStr := stdout.String() - t.Logf("command: %s\nstdout:\n%s\n\nstderr:\n%s\n", - strings.Join(cmd, " "), stdoutStr, stderr.String()) - if err != nil { - return err - } - httpStatusCode := stdoutStr[len(stdoutStr)-3:] - switch string(httpStatusCode[0]) { - case "0", "4", "5": - return fmt.Errorf("got HTTP %s status code", httpStatusCode) - default: - return nil - } - } - optionalPolicyTestCases := []struct { description string cert string @@ -328,7 +260,7 @@ func TestClientTLS(t *testing.T) { expectAllowed: false, }} for _, tc := range optionalPolicyTestCases { - err := curl(tc.cert) + _, err := curlGetStatusCode(t, clientPod, tc.cert, route.Spec.Host, service.Spec.ClusterIP, true) if err == nil && !tc.expectAllowed { t.Errorf("%q: expected error, got success", tc.description) } @@ -374,7 +306,7 @@ func TestClientTLS(t *testing.T) { expectAllowed: false, }} for _, tc := range requiredPolicyTestCases { - err := curl(tc.cert) + _, err := curlGetStatusCode(t, clientPod, tc.cert, route.Spec.Host, service.Spec.ClusterIP, true) if err == nil && !tc.expectAllowed { t.Errorf("%q: expected error, got success", tc.description) } @@ -384,6 +316,712 @@ func TestClientTLS(t *testing.T) { } } +// TestMTLSWithCRLsCerts includes all the certificates needed for a particular test case of TestMTLSWithCRLs. +type TestMTLSWithCRLsCerts struct { + // CABundle is the complete PEM-encoded list of certificates that will be used both by HAProxy for client + // validation, and by openshift-router for CRL distribution points. + CABundle []string + // CRLs is a map of the PEM-encoded CRLs, indexed by the filename that will be used in the CRL host pod's configmap. + CRLs map[string]string + // ClientCerts contains maps of the client certificates used to verify that mTLS is working as intended. + ClientCerts struct { + // Accepted is a map containing the client keys and certificates that should be able to connect to backends + // successfully, indexed by a unique name. + Accepted map[string]KeyCert + // Rejected is a map containing the client keys and certificates that should NOT be able to connect to backends, + // indexed by a unique name. + Rejected map[string]KeyCert + } +} + +// TestMTLSWithCRLs verifies that mTLS works when the client auth chain includes certificate revocation lists (CRLs). +func TestMTLSWithCRLs(t *testing.T) { + t.Parallel() + namespaceName := names.SimpleNameGenerator.GenerateName("mtls-with-crls") + crlHostName := types.NamespacedName{ + Name: "crl-host", + Namespace: namespaceName, + } + // When generating certificates, the CRL distribution points need to be specified by URL + crlHostServiceName := "crl-host-service" + crlHostURL := crlHostServiceName + "." + crlHostName.Namespace + ".svc" + testCases := []struct { + // Name is the name of the test case. + Name string + // CreateCerts generates the certificates for the test case. Certificates and CRLs must not have expired at the + // time of the run, so they must be generated at runtime. + CreateCerts func() TestMTLSWithCRLsCerts + }{ + { + // This test case has CA certificates including a CRL distribution point (CDP) for the CRL that they + // generate and sign. This is the default way to distribute CRLs according to RFC-5280 + // + // CA Bundle: + // - Intermediate CA + // - Includes CRL distribution point for intermediate-ca.crl + // - Signed by Root CA. + // - Root CA + // - Includes CRL distribution point for root-ca.crl + // - Self signed. + // + // Client Certificates: + // - signed-by-root + // - Signed by Root CA. + // - Should successfully connect. + // - signed-by-intermediate + // - Signed by Intermediate CA. + // - Should successfully connect. + // - revoked-by-root + // - Signed by Root CA. + // - Has been revoked. + // - Should be rejected due to revocation. + // - revoked-by-intermediate + // - Signed by Intermediate CA + // - Has been revoked + // - Should be rejected due to revocation. + // - self-signed + // - Self signed + // - Should be rejected because it's not signed by any trusted CA. + Name: "certificate-distributes-its-own-crl", + CreateCerts: func() TestMTLSWithCRLsCerts { + rootCDP := "http://" + crlHostURL + "/root/root.crl" + intermediateCDP := "http://" + crlHostURL + "/intermediate/intermediate.crl" + + rootCA := MustCreateTLSKeyCert("testing root CA", time.Now(), time.Now().Add(24*time.Hour), true, []string{rootCDP}, nil) + intermediateCA := MustCreateTLSKeyCert("testing intermediate CA", time.Now(), time.Now().Add(24*time.Hour), true, []string{intermediateCDP}, &rootCA) + + signedByRoot := MustCreateTLSKeyCert("client signed by root", time.Now(), time.Now().Add(24*time.Hour), false, nil, &rootCA) + signedByIntermediate := MustCreateTLSKeyCert("client signed by intermediate", time.Now(), time.Now().Add(24*time.Hour), false, nil, &intermediateCA) + revokedByRoot := MustCreateTLSKeyCert("client revoked by root", time.Now(), time.Now().Add(24*time.Hour), false, nil, &rootCA) + revokedByIntermediate := MustCreateTLSKeyCert("client revoked by intermediate", time.Now(), time.Now().Add(24*time.Hour), false, nil, &intermediateCA) + selfSigned := MustCreateTLSKeyCert("self signed cert", time.Now(), time.Now().Add(24*time.Hour), false, nil, nil) + + _, rootCRLPem := MustCreateCRL(nil, rootCA, time.Now(), time.Now().Add(1*time.Hour), RevokeCertificates(time.Now(), revokedByRoot)) + _, intermediateCRLPem := MustCreateCRL(nil, intermediateCA, time.Now(), time.Now().Add(1*time.Hour), RevokeCertificates(time.Now(), revokedByIntermediate)) + + return TestMTLSWithCRLsCerts{ + CABundle: []string{ + intermediateCA.CertPem, + rootCA.CertPem, + }, + CRLs: map[string]string{ + "root": rootCRLPem, + "intermediate": intermediateCRLPem, + }, + ClientCerts: struct { + Accepted map[string]KeyCert + Rejected map[string]KeyCert + }{ + Accepted: map[string]KeyCert{ + "signed-by-root": signedByRoot, + "signed-by-intermediate": signedByIntermediate, + }, + Rejected: map[string]KeyCert{ + "revoked-by-root": revokedByRoot, + "revoked-by-intermediate": revokedByIntermediate, + "self-signed": selfSigned, + }, + }, + } + }, + }, + { + // This test case has certificates including the CRL distribution point of their signer (i.e. intermediate + // CA is signed by root CA, and includes the URL for root's CRL). In this case, neither of the certificates + // in the CA bundle include the intermediate CRL, so connections that rely on it will be rejected. + // + // CA Bundle: + // - Intermediate CA + // - Includes CRL distribution point for root-ca.crl + // - Signed by Root CA. + // - Root CA + // - No CRL distribution point. + // - Self signed. + // + // Note that intermediate-ca.crl is not present in the CA bundle. + // + // Client Certificates: + // - signed-by-root + // - Includes CRL distribution point for root-ca.crl + // - Signed by Root CA. + // - Should successfully connect. + // - signed-by-intermediate + // - Includes CRL distribution point for intermediate-ca.crl + // - Signed by Intermediate CA. + // - Should be rejected because HAProxy doesn't have intermediate-ca.crl (SSL error "unknown ca"). + // - revoked-by-root + // - Includes CRL distribution point for root-ca.crl + // - Signed by Root CA. + // - Has been revoked. + // - Should be rejected due to revocation. + // - revoked-by-intermediate + // - Includes CRL distribution point for intermediate-ca.crl + // - Signed by Intermediate CA + // - Has been revoked + // - Should be rejected because HAProxy doesn't have intermediate-ca.crl (SSL error "unknown ca"). + // - self-signed + // - Self signed + // - Should be rejected because it's not signed by any trusted CA (SSL error "unknown ca"). + Name: "certificate-distributes-its-signers-crl", + CreateCerts: func() TestMTLSWithCRLsCerts { + rootCDP := "http://" + crlHostURL + "/root/root.crl" + intermediateCDP := "http://" + crlHostURL + "/intermediate/intermediate.crl" + + rootCA := MustCreateTLSKeyCert("testing root CA", time.Now(), time.Now().Add(24*time.Hour), true, nil, nil) + intermediateCA := MustCreateTLSKeyCert("testing intermediate CA", time.Now(), time.Now().Add(24*time.Hour), true, []string{rootCDP}, &rootCA) + + signedByRoot := MustCreateTLSKeyCert("client signed by root", time.Now(), time.Now().Add(24*time.Hour), true, []string{rootCDP}, &rootCA) + signedByIntermediate := MustCreateTLSKeyCert("client signed by intermediate", time.Now(), time.Now().Add(24*time.Hour), true, []string{intermediateCDP}, &intermediateCA) + revokedByRoot := MustCreateTLSKeyCert("client revoked by root", time.Now(), time.Now().Add(24*time.Hour), true, []string{rootCDP}, &rootCA) + revokedByIntermediate := MustCreateTLSKeyCert("client revoked by intermediate", time.Now(), time.Now().Add(24*time.Hour), true, []string{intermediateCDP}, &intermediateCA) + selfSigned := MustCreateTLSKeyCert("self signed cert", time.Now(), time.Now().Add(24*time.Hour), false, nil, nil) + + _, rootCRLPem := MustCreateCRL(nil, rootCA, time.Now(), time.Now().Add(1*time.Hour), RevokeCertificates(time.Now(), revokedByRoot)) + _, intermediateCRLPem := MustCreateCRL(nil, intermediateCA, time.Now(), time.Now().Add(1*time.Hour), RevokeCertificates(time.Now(), revokedByIntermediate)) + + return TestMTLSWithCRLsCerts{ + CABundle: []string{ + intermediateCA.CertPem, + rootCA.CertPem, + }, + CRLs: map[string]string{ + "root": rootCRLPem, + "intermediate": intermediateCRLPem, + }, + ClientCerts: struct { + Accepted map[string]KeyCert + Rejected map[string]KeyCert + }{ + Accepted: map[string]KeyCert{ + "signed-by-root": signedByRoot, + }, + Rejected: map[string]KeyCert{ + "signed-by-intermediate": signedByIntermediate, + "revoked-by-root": revokedByRoot, + "revoked-by-intermediate": revokedByIntermediate, + "self-signed": selfSigned, + }, + }, + } + }, + }, + { + // This test case has certificates including the CRL distribution point of their signer. In this case, a + // leaf (client) certificate is included in the CA bundle so that openshift-router is aware of the + // intermediate CRL's distribution point, so certificates signed by intermediate will work. + // TODO: update this test case when RFE-3605 or a similar fix is implemented + // + // CA Bundle: + // - revoked-by-intermediate + // - Includes CRL distribution point for intermediate-ca.crl + // - Signed by Intermediate CA + // - Intermediate CA + // - Includes CRL distribution point for root-ca.crl + // - Signed by Root CA. + // - Root CA + // - No CRL distribution point. + // - Self signed. + // + // Including revoked-by-intermediate in the CA bundle is the "workaround" in the test name. It makes sure + // intermediate-ca.crl is listed in the CA bundle, so openshift-router knows to download it. + // + // Client Certificates: + // - signed-by-root + // - Includes CRL distribution point for root-ca.crl + // - Signed by Root CA. + // - Should successfully connect. + // - signed-by-intermediate + // - Includes CRL distribution point for intermediate-ca.crl + // - Signed by Intermediate CA. + // - Should successfully connect. + // - revoked-by-root + // - Includes CRL distribution point for root-ca.crl + // - Signed by Root CA. + // - Has been revoked. + // - Should be rejected due to revocation. + // - revoked-by-intermediate + // - Includes CRL distribution point for intermediate-ca.crl + // - Signed by Intermediate CA + // - Has been revoked + // - Should be rejected due to revocation. + // - self-signed + // - Self signed + // - Should be rejected because it's not signed by any trusted CA (SSL error "unknown ca"). + Name: "certificate-distributes-its-signers-crl-with-workaround", + CreateCerts: func() TestMTLSWithCRLsCerts { + rootCDP := "http://" + crlHostURL + "/root/root.crl" + intermediateCDP := "http://" + crlHostURL + "/intermediate/intermediate.crl" + + rootCA := MustCreateTLSKeyCert("testing root CA", time.Now(), time.Now().Add(24*time.Hour), true, nil, nil) + intermediateCA := MustCreateTLSKeyCert("testing intermediate CA", time.Now(), time.Now().Add(24*time.Hour), true, []string{rootCDP}, &rootCA) + + signedByRoot := MustCreateTLSKeyCert("client signed by root", time.Now(), time.Now().Add(24*time.Hour), true, []string{rootCDP}, &rootCA) + signedByIntermediate := MustCreateTLSKeyCert("client signed by intermediate", time.Now(), time.Now().Add(24*time.Hour), true, []string{intermediateCDP}, &intermediateCA) + revokedByRoot := MustCreateTLSKeyCert("client revoked by root", time.Now(), time.Now().Add(24*time.Hour), true, []string{rootCDP}, &rootCA) + revokedByIntermediate := MustCreateTLSKeyCert("client revoked by intermediate", time.Now(), time.Now().Add(24*time.Hour), true, []string{intermediateCDP}, &intermediateCA) + selfSigned := MustCreateTLSKeyCert("self signed cert", time.Now(), time.Now().Add(24*time.Hour), false, nil, nil) + + _, rootCRLPem := MustCreateCRL(nil, rootCA, time.Now(), time.Now().Add(1*time.Hour), RevokeCertificates(time.Now(), revokedByRoot)) + _, intermediateCRLPem := MustCreateCRL(nil, intermediateCA, time.Now(), time.Now().Add(1*time.Hour), RevokeCertificates(time.Now(), revokedByIntermediate)) + + return TestMTLSWithCRLsCerts{ + CABundle: []string{ + revokedByIntermediate.CertPem, + intermediateCA.CertPem, + rootCA.CertPem, + }, + CRLs: map[string]string{ + "root": rootCRLPem, + "intermediate": intermediateCRLPem, + }, + ClientCerts: struct { + Accepted map[string]KeyCert + Rejected map[string]KeyCert + }{ + Accepted: map[string]KeyCert{ + "signed-by-root": signedByRoot, + "signed-by-intermediate": signedByIntermediate, + }, + Rejected: map[string]KeyCert{ + "revoked-by-root": revokedByRoot, + "revoked-by-intermediate": revokedByIntermediate, + "self-signed": selfSigned, + }, + }, + } + }, + }, + { + // large-crl verifies that CRLs larger than 1MB can be used. This tests the fix for OCPBUGS-6661 + Name: "large-crl", + CreateCerts: func() TestMTLSWithCRLsCerts { + maxDummyRevokedSerialNumber := 25000 + rootCDP := "http://" + crlHostURL + "/root/root.crl" + intermediateCDP := "http://" + crlHostURL + "/intermediate/intermediate.crl" + + rootCA := MustCreateTLSKeyCert("testing root CA", time.Now(), time.Now().Add(24*time.Hour), true, []string{rootCDP}, nil) + intermediateCA := MustCreateTLSKeyCert("testing intermediate CA", time.Now(), time.Now().Add(24*time.Hour), true, []string{intermediateCDP}, &rootCA) + + signedByRoot := MustCreateTLSKeyCert("client signed by root", time.Now(), time.Now().Add(24*time.Hour), false, nil, &rootCA) + signedByIntermediate := MustCreateTLSKeyCert("client signed by intermediate", time.Now(), time.Now().Add(24*time.Hour), false, nil, &intermediateCA) + revokedByRoot := MustCreateTLSKeyCert("client revoked by root", time.Now(), time.Now().Add(24*time.Hour), false, nil, &rootCA) + revokedByIntermediate := MustCreateTLSKeyCert("client revoked by intermediate", time.Now(), time.Now().Add(24*time.Hour), false, nil, &intermediateCA) + selfSigned := MustCreateTLSKeyCert("self signed cert", time.Now(), time.Now().Add(24*time.Hour), false, nil, nil) + + // Generate a set of CRL files that are larger than 1MB by revoking a large number of certificates. The + // revocation list only includes the serial number of the certificate and the time of revocation, so we + // don't actually need to generate real certificates, just some serial numbers. We can also repeat the + // same serial numbers in each CRL, cutting down on the number we need to generate by half + revokedCerts := []pkix.RevokedCertificate{} + for i := int64(1); i <= int64(maxDummyRevokedSerialNumber); i++ { + serialNumber := big.NewInt(i) + // It's highly unlikely that any of the certs we explicitly generated have serial numbers less than + // maxDummyRevokedSerialNumber since they're 20-byte UUIDs, but is possible. In the unlikely event + // that there's some overlap, don't include those serial numbers in the initial list of "revoked" + // certs. + switch { + case signedByRoot.Cert.SerialNumber.Cmp(serialNumber) == 0: + continue + case revokedByRoot.Cert.SerialNumber.Cmp(serialNumber) == 0: + continue + case signedByIntermediate.Cert.SerialNumber.Cmp(serialNumber) == 0: + continue + case revokedByIntermediate.Cert.SerialNumber.Cmp(serialNumber) == 0: + continue + } + revokedCerts = append(revokedCerts, pkix.RevokedCertificate{ + SerialNumber: serialNumber, + RevocationTime: time.Now(), + }) + } + + rootRevokedCerts := make([]pkix.RevokedCertificate, len(revokedCerts)) + copy(rootRevokedCerts, revokedCerts) + rootRevokedCerts = append(rootRevokedCerts, pkix.RevokedCertificate{ + SerialNumber: revokedByRoot.Cert.SerialNumber, + RevocationTime: time.Now(), + }) + _, rootCRLPem := MustCreateCRL(nil, rootCA, time.Now(), time.Now().Add(1*time.Hour), rootRevokedCerts) + + intermediateRevokedCerts := make([]pkix.RevokedCertificate, len(revokedCerts)) + copy(intermediateRevokedCerts, revokedCerts) + intermediateRevokedCerts = append(intermediateRevokedCerts, pkix.RevokedCertificate{ + SerialNumber: revokedByIntermediate.Cert.SerialNumber, + RevocationTime: time.Now(), + }) + _, intermediateCRLPem := MustCreateCRL(nil, intermediateCA, time.Now(), time.Now().Add(1*time.Hour), intermediateRevokedCerts) + t.Logf("Root CRL Size: %dKB\nIntermediate CRL Size: %dKB\nTotal Size: %dKB", len(rootCRLPem)/1024, len(intermediateCRLPem)/1024, (len(rootCRLPem)+len(intermediateCRLPem))/1024) + + return TestMTLSWithCRLsCerts{ + CABundle: []string{ + intermediateCA.CertPem, + rootCA.CertPem, + }, + CRLs: map[string]string{ + "root": rootCRLPem, + "intermediate": intermediateCRLPem, + }, + ClientCerts: struct { + Accepted map[string]KeyCert + Rejected map[string]KeyCert + }{ + Accepted: map[string]KeyCert{ + "signed-by-root": signedByRoot, + "signed-by-intermediate": signedByIntermediate, + }, + Rejected: map[string]KeyCert{ + "revoked-by-root": revokedByRoot, + "revoked-by-intermediate": revokedByIntermediate, + "self-signed": selfSigned, + }, + }, + } + }, + }, + { + // multiple-intermediate-ca tests that more than 2 CAs can be used. Each CA lists its own CRL's distribution point. + Name: "multiple-intermediate-ca", + CreateCerts: func() TestMTLSWithCRLsCerts { + CANames := []string{ + "root", + "foo", + "bar", + "baz", + "quux", + } + caCerts := map[string]KeyCert{} + acceptedClientCerts := map[string]KeyCert{} + rejectedClientCerts := map[string]KeyCert{} + crls := map[string]string{} + caBundle := []string{} + + for i, name := range CANames { + crlDistributionPoint := "http://" + crlHostURL + "/" + name + "/" + name + ".crl" + caCert := KeyCert{} + if i == 0 { + // i = 0 is the root certificate, so it's self signed. + caCert = MustCreateTLSKeyCert(name, time.Now(), time.Now().Add(24*time.Hour), true, []string{crlDistributionPoint}, nil) + } else { + // Non-root certificates are signed by the previous CA in the list. + signer := caCerts[CANames[i-1]] + caCert = MustCreateTLSKeyCert(name, time.Now(), time.Now().Add(24*time.Hour), true, []string{crlDistributionPoint}, &signer) + } + caCerts[name] = caCert + caBundle = append(caBundle, caCerts[name].CertPem) + + // For each CA, generate 1 cert that will be accepted, and 1 that will be revoked (and therefore rejected). + acceptedCert := MustCreateTLSKeyCert("client signed by "+name, time.Now(), time.Now().Add(24*time.Hour), false, nil, &caCert) + revokedCert := MustCreateTLSKeyCert("client revoked by "+name, time.Now(), time.Now().Add(24*time.Hour), false, nil, &caCert) + _, crls[name] = MustCreateCRL(nil, caCerts[name], time.Now(), time.Now().Add(1*time.Hour), RevokeCertificates(time.Now(), revokedCert)) + acceptedClientCerts["signed-by-"+name] = acceptedCert + rejectedClientCerts["revoked-by-"+name] = revokedCert + } + + // In addition to the certificates for each CA, include a self-signed certificate to make sure it's rejected. + rejectedClientCerts["self-signed"] = MustCreateTLSKeyCert("self signed cert", time.Now(), time.Now().Add(24*time.Hour), false, nil, nil) + + return TestMTLSWithCRLsCerts{ + CABundle: caBundle, + CRLs: crls, + ClientCerts: struct { + Accepted map[string]KeyCert + Rejected map[string]KeyCert + }{ + Accepted: acceptedClientCerts, + Rejected: rejectedClientCerts, + }, + } + }, + }, + } + + namespace := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespaceName, + }, + } + if err := kclient.Create(context.TODO(), &namespace); err != nil { + t.Fatalf("Failed to create namespace %q: %v", namespace.Name, err) + } + defer assertDeletedWaitForCleanup(t, kclient, &namespace) + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + tcCerts := tc.CreateCerts() + // Create a pod which will host the CRLs. + crlHostPod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: crlHostName.Name, + Namespace: namespace.Name, + Labels: map[string]string{"app": crlHostName.Name}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "httpd", + Image: "quay.io/centos7/httpd-24-centos7", + Ports: []corev1.ContainerPort{{ + ContainerPort: 8080, + Name: "http-svc", + }}, + SecurityContext: generateUnprivilegedSecurityContext(), + }}, + }, + } + for name, crl := range tcCerts.CRLs { + crlConfigMapName := name + "-crl" + crlConfigMap := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: crlConfigMapName, + Namespace: namespace.Name, + }, + Data: map[string]string{ + name + ".crl": crl, + }, + } + crlHostPod.Spec.Volumes = append(crlHostPod.Spec.Volumes, corev1.Volume{ + Name: name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: crlConfigMap.Name, + }, + }, + }, + }) + crlHostPod.Spec.Containers[0].VolumeMounts = append(crlHostPod.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: name, + MountPath: filepath.Join("/var/www/html", name), + ReadOnly: true, + }) + + if err := kclient.Create(context.TODO(), &crlConfigMap); err != nil { + t.Fatalf("Failed to create configmap %q: %v", crlConfigMap.Name, err) + } + defer assertDeleted(t, kclient, &crlConfigMap) + } + + if err := kclient.Create(context.TODO(), &crlHostPod); err != nil { + t.Fatalf("Failed to create pod %q: %v", crlHostPod.Name, err) + } + // the crlHostPod is one of the first resources to be created, and one of the last to be deleted thanks to + // defer stack ordering. calling assertDeletedWaitForCleanup here makes sure that the test case doesn't + // finish until it's fully cleaned up, so when the next test case creates its own version of crlHostPod, it + // won't be clashing. As of this writing, the other resources are normally cleaned up before the next test + // case comes through and creates a new one, but if that stops being true in the future, their assertDeleted + // calls may need to be replaced by the slower assertDeletedWaitForCleanup option. + defer assertDeletedWaitForCleanup(t, kclient, &crlHostPod) + crlHostService := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: crlHostServiceName, + Namespace: namespace.Name, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{"app": crlHostName.Name}, + Ports: []corev1.ServicePort{{ + Name: "http", + Port: 80, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromString("http-svc"), + }}, + }, + } + if err := kclient.Create(context.TODO(), &crlHostService); err != nil { + t.Fatalf("Failed to create service %q: %v", crlHostService.Name, err) + } + defer assertDeleted(t, kclient, &crlHostService) + // Wait for CRL host to be ready + err := wait.PollImmediate(2*time.Second, 3*time.Minute, func() (bool, error) { + if err := kclient.Get(context.TODO(), crlHostName, &crlHostPod); err != nil { + t.Logf("error getting pod %s/%s: %v", crlHostName.Namespace, crlHostName.Name, err) + return false, nil + } + for _, condition := range crlHostPod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + return true, nil + } + } + return false, nil + }) + // Create CA cert bundle + clientCAConfigmapName := "client-ca-cm-" + namespace.Name + clientCAConfigmap := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: clientCAConfigmapName, + Namespace: "openshift-config", + }, + Data: map[string]string{ + "ca-bundle.pem": strings.Join(tcCerts.CABundle, "\n"), + }, + } + if err := kclient.Create(context.TODO(), &clientCAConfigmap); err != nil { + t.Fatalf("Failed to create CA cert configmap: %v", err) + } + defer assertDeleted(t, kclient, &clientCAConfigmap) + icName := types.NamespacedName{ + Name: "mtls-with-crls", + Namespace: operatorNamespace, + } + icDomain := icName.Name + "." + dnsConfig.Spec.BaseDomain + ic := newPrivateController(icName, icDomain) + ic.Spec.ClientTLS = operatorv1.ClientTLS{ + ClientCA: configv1.ConfigMapNameReference{ + Name: clientCAConfigmapName, + }, + ClientCertificatePolicy: operatorv1.ClientCertificatePolicyRequired, + } + if err := kclient.Create(context.TODO(), ic); err != nil { + t.Fatalf("failed to create ingresscontroller %s: %v", icName, err) + } + defer assertIngressControllerDeleted(t, kclient, ic) + + if err := waitForIngressControllerCondition(t, kclient, 5*time.Minute, icName, availableConditionsForPrivateIngressController...); err != nil { + t.Fatalf("failed to observe expected conditions: %v", err) + } + + // The client pod will need the client certificates we generated, so create a configmap with all the client + // certificates and keys, and mount that to the client pod. + clientCertsConfigmap := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "client-certificates", + Namespace: namespace.Name, + }, + Data: map[string]string{}, + } + for name, keyCert := range tcCerts.ClientCerts.Accepted { + clientCertsConfigmap.Data[name+".key"] = encodeKey(keyCert.Key) + clientCertsConfigmap.Data[name+".pem"] = keyCert.CertFullChain + } + for name, keyCert := range tcCerts.ClientCerts.Rejected { + clientCertsConfigmap.Data[name+".key"] = encodeKey(keyCert.Key) + clientCertsConfigmap.Data[name+".pem"] = keyCert.CertFullChain + } + if err := kclient.Create(context.TODO(), &clientCertsConfigmap); err != nil { + t.Fatalf("failed to create configmap %q: %v", clientCertsConfigmap.Name, err) + } + defer assertDeleted(t, kclient, &clientCertsConfigmap) + + // Use the router image for the exec pod since it has curl. + routerDeployment := &appsv1.Deployment{} + routerDeploymentName := controller.RouterDeploymentName(ic) + if err := kclient.Get(context.TODO(), routerDeploymentName, routerDeployment); err != nil { + t.Fatalf("failed to get routerDeployment %q: %v", routerDeploymentName, err) + } + + podName := "mtls-with-crls-client" + image := routerDeployment.Spec.Template.Spec.Containers[0].Image + clientPod := buildExecPod(podName, namespace.Name, image) + clientPod.Spec.Volumes = []corev1.Volume{{ + Name: "client-certificates", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: clientCertsConfigmap.Name, + }, + }, + }, + }} + clientPod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{ + Name: "client-certificates", + MountPath: "/tmp/tls/", + ReadOnly: true, + }} + clientPodName := types.NamespacedName{ + Name: clientPod.Name, + Namespace: clientPod.Namespace, + } + if err := kclient.Create(context.TODO(), clientPod); err != nil { + t.Fatalf("failed to create pod %q: %v", clientPodName, err) + } + defer assertDeleted(t, kclient, clientPod) + + err = wait.PollImmediate(2*time.Second, 3*time.Minute, func() (bool, error) { + if err := kclient.Get(context.TODO(), clientPodName, clientPod); err != nil { + t.Logf("failed to get client pod %q: %v", clientPodName, err) + return false, nil + } + for _, cond := range clientPod.Status.Conditions { + if cond.Type == corev1.PodReady { + return cond.Status == corev1.ConditionTrue, nil + } + } + return false, nil + }) + if err != nil { + t.Fatalf("timed out waiting for pod %q to become ready: %v", clientPodName, err) + } + + // Wait until the CRLs are downloaded + podList := &corev1.PodList{} + labels := map[string]string{ + controller.ControllerDeploymentLabel: icName.Name, + } + if err := kclient.List(context.TODO(), podList, client.InNamespace("openshift-ingress"), client.MatchingLabels(labels)); err != nil { + t.Logf("failed to list pods for ingress controllers %s: %v", ic.Name, err) + } + if len(podList.Items) == 0 { + t.Fatalf("no router pods found for ingresscontroller %s: %v", ic.Name, err) + } + routerPod := podList.Items[0] + err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { + // Get the current CRL file from the router container + cmd := []string{"cat", "/var/lib/haproxy/mtls/latest/crls.pem"} + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + if err := podExec(t, routerPod, &stdout, &stderr, cmd); err != nil { + t.Logf("exec %q failed. error: %v\nstdout:\n%s\nstderr:\n%s", cmd, err, stdout.String(), stderr.String()) + return false, err + } + // Parse the first CRL. If CRLs haven't been downloaded yet, it will be the placeholder CRL. + block, _ := pem.Decode(stdout.Bytes()) + crl, err := x509.ParseRevocationList(block.Bytes) + if err != nil { + return false, fmt.Errorf("invalid CRL: %v", err) + } + return crl.Issuer.CommonName != "Placeholder CA", nil + }) + + // Get the canary route to use as the target for curl. + route := &routev1.Route{} + routeName := controller.CanaryRouteName() + err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { + if err := kclient.Get(context.TODO(), routeName, route); err != nil { + t.Logf("failed to get route %q: %v", routeName, err) + return false, nil + } + return true, nil + }) + if err != nil { + t.Fatalf("failed to observe route %q: %v", routeName, err) + } + + // If the canary route is used, normally the default ingress controller will handle the request, but by + // using curl's --resolve flag, we can send an HTTP request intended for the canary pod directly to our + // ingress controller instead. In order to do that, we need the ingress controller's service IP. + service := &corev1.Service{} + serviceName := controller.InternalIngressControllerServiceName(ic) + if err := kclient.Get(context.TODO(), serviceName, service); err != nil { + t.Fatalf("failed to get service %q: %v", serviceName, err) + } + + for certName := range tcCerts.ClientCerts.Accepted { + if _, err := curlGetStatusCode(t, clientPod, certName, route.Spec.Host, service.Spec.ClusterIP, false); err != nil { + t.Errorf("Failed to curl route with cert %q: %v", certName, err) + } + } + for certName := range tcCerts.ClientCerts.Rejected { + if httpStatusCode, err := curlGetStatusCode(t, clientPod, certName, route.Spec.Host, service.Spec.ClusterIP, false); err != nil { + if httpStatusCode == 0 { + // TLS/SSL verification failures result in a 0 http status code (no connection is made to the backend, so no http status code is returned). + continue + } + t.Errorf("Unexpected error from curl for cert %q: %v", certName, err) + } else { + t.Errorf("Expected curl route with cert %q to fail but succeeded", certName) + } + } + }) + } +} + // generateClientCA generates and returns a CA certificate and key. func generateClientCA() (*x509.Certificate, *rsa.PrivateKey, error) { key, err := rsa.GenerateKey(rand.Reader, 2048) @@ -473,3 +1111,61 @@ func encodeKey(key *rsa.PrivateKey) string { Bytes: x509.MarshalPKCS1PrivateKey(key), })) } + +// curlGetStatusCode execs a Curl command in the test client pod and returns an error value. The Curl command uses the +// specified certificate from the client certificates configmap, sends a request for the canary route via the router's +// internal service. Returns the HTTP status code returned from curl, and an error either if there is an HTTP error, or +// if there's another error in running the command. If the error was not an HTTP error, the HTTP status code returned +// will be -1. +func curlGetStatusCode(t *testing.T, clientPod *corev1.Pod, certName, endpoint, ingressControllerIP string, verbose bool) (int64, error) { + t.Helper() + cmd := []string{ + "/bin/curl", + "--silent", + // Allow self-signed certs. + "-k", + // Output the http status code (i.e. 200 (OK) or 404 (Not found)) to stdout. + "-w", "%{http_code}", + // Retry on timeouts, 4xx errors, or 500/502/503/504 errors. + "--retry", "10", + // Sleep 1 second between retries. + "--retry-delay", "1", + // Use --resolve to guarantee that the request is sent through this test's ingress controller. + "--resolve", fmt.Sprintf("%s:443:%s", endpoint, ingressControllerIP), + fmt.Sprintf("https://%s", endpoint), + } + if verbose { + cmd = append(cmd, "-v") + } + if len(certName) != 0 { + cmd = append(cmd, + "--cert", fmt.Sprintf("/tmp/tls/%s.pem", certName), + "--key", fmt.Sprintf("/tmp/tls/%s.key", certName), + ) + } + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + curlErr := podExec(t, *clientPod, &stdout, &stderr, cmd) + stdoutStr := stdout.String() + t.Logf("command: %s\nstdout:\n%s\n\nstderr:\n%s\n", + strings.Join(cmd, " "), stdoutStr, stderr.String()) + // Try to parse the http status code even if curl returns an error; it may still be relevant. + httpStatusCode := stdoutStr[len(stdoutStr)-3:] + httpStatusCodeInt, err := strconv.ParseInt(httpStatusCode, 10, 64) + if err != nil { + // If parsing the status code returns an error but curl also returned an error, just send the curl one. + if curlErr != nil { + return -1, curlErr + } + return -1, err + } + if curlErr != nil { + return httpStatusCodeInt, curlErr + } + switch httpStatusCode[0] { + case '0', '4', '5': + return httpStatusCodeInt, fmt.Errorf("got HTTP %s status code", httpStatusCode) + default: + return httpStatusCodeInt, nil + } +} diff --git a/test/e2e/util_certgen.go b/test/e2e/util_certgen.go new file mode 100644 index 0000000000..70c7d75ef8 --- /dev/null +++ b/test/e2e/util_certgen.go @@ -0,0 +1,185 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "fmt" + "math/big" + "strings" + "time" +) + +// KeyCert bundles a certificate with its associated private key. +type KeyCert struct { + // The private key + Key *rsa.PrivateKey + // The certificate in logical form + Cert *x509.Certificate + // The signed certificate in binary form + CertPem string + // The entire certificate chain in binary form + CertFullChain string +} + +// CreateTLSKeyCert creates a key and certificate with CN commonName, valid between notBefore and notAfter, and with CRL +// Distribution Points crlDistributionPoints (if any). If isCA is true, the certificate is marked as being a CA +// certificate. If issuer is non-nil, the pem-encoded certificate is signed by issuer, otherwise it is self signed. +// +// Returns a KeyCert containing the key and certificate in logical form, as well as a pem-encoded version of the +// certificate. +func CreateTLSKeyCert(commonName string, notBefore, notAfter time.Time, isCA bool, crlDistributionPoints []string, issuer *KeyCert) (KeyCert, error) { + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return KeyCert{}, fmt.Errorf("failed to generate serial number: %w", err) + } + + privKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return KeyCert{}, fmt.Errorf("failed to generate key: %w", err) + } + + subjectKeyId, err := generateSubjectKeyID(privKey) + if err != nil { + return KeyCert{}, fmt.Errorf("failed to generate subject key ID: %w", err) + } + + certificate := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Openshift E2E Testing"}, + OrganizationalUnit: []string{"Engineering"}, + CommonName: commonName, + }, + NotBefore: notBefore, + NotAfter: notAfter, + IsCA: isCA, + BasicConstraintsValid: true, + CRLDistributionPoints: crlDistributionPoints, + SubjectKeyId: subjectKeyId, + } + + if isCA { + certificate.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature + //certificate.MaxPathLenZero = true + } else { + certificate.KeyUsage = x509.KeyUsageDigitalSignature + } + + // If no issuer is specified, self-sign + if issuer == nil { + issuer = &KeyCert{ + Key: privKey, + Cert: certificate, + } + } + + certBytes, err := x509.CreateCertificate(rand.Reader, certificate, issuer.Cert, &privKey.PublicKey, issuer.Key) + if err != nil { + return KeyCert{}, fmt.Errorf("failed to create certificate %s: %w", commonName, err) + } + + pemBuffer := new(bytes.Buffer) + if err := pem.Encode(pemBuffer, &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }); err != nil { + return KeyCert{}, fmt.Errorf("failed to pem encode certificate %s: %w", commonName, err) + } + certPem := pemBuffer.String() + certFullChain := strings.Join([]string{certPem, issuer.CertFullChain}, "") + + return KeyCert{Key: privKey, Cert: certificate, CertPem: certPem, CertFullChain: certFullChain}, nil +} + +// MustCreateTLSKeyCert calls CreateTLSKeyCert, but instead of returning an error, it panics if an error occurs +func MustCreateTLSKeyCert(commonName string, notBefore, notAfter time.Time, isCA bool, crlDistributionPoints []string, issuer *KeyCert) KeyCert { + keyCert, err := CreateTLSKeyCert(commonName, notBefore, notAfter, isCA, crlDistributionPoints, issuer) + if err != nil { + panic(err) + } + return keyCert +} + +// CreateCRL generates a pem-encoded CRL for issuer, valid between thisUpdate and nextUpdate, that lists revokedCerts as +// revoked. Returns the logical form of the CRL, as well as a pem-encoded version. +func CreateCRL(revocationList *x509.RevocationList, issuer KeyCert, thisUpdate, nextUpdate time.Time, revokedCerts []pkix.RevokedCertificate) (*x509.RevocationList, string, error) { + if revocationList == nil { + revocationList = &x509.RevocationList{ + Issuer: issuer.Cert.Subject, + Number: big.NewInt(1), + } + } else { + revocationList.Number.Add(revocationList.Number, big.NewInt(1)) + } + revocationList.ThisUpdate = thisUpdate + revocationList.NextUpdate = nextUpdate + revocationList.RevokedCertificates = revokedCerts + + crlBytes, err := x509.CreateRevocationList(rand.Reader, revocationList, issuer.Cert, issuer.Key) + if err != nil { + return nil, "", fmt.Errorf("failed to create CRL for issuer %s: %w", issuer.Cert.Subject.CommonName, err) + } + + crlBuffer := new(bytes.Buffer) + if err := pem.Encode(crlBuffer, &pem.Block{ + Type: "X509 CRL", + Bytes: crlBytes, + }); err != nil { + return nil, "", fmt.Errorf("failed to pem encode CRL for issuer %s: %w", issuer.Cert.Subject.CommonName, err) + } + + return revocationList, crlBuffer.String(), nil +} + +// MustCreateCRL calls CreateCRL, but instead of returning an error, it panics if an error occurs +func MustCreateCRL(revocationList *x509.RevocationList, issuer KeyCert, thisUpdate, nextUpdate time.Time, revokedCerts []pkix.RevokedCertificate) (*x509.RevocationList, string) { + crl, crlPem, err := CreateCRL(revocationList, issuer, thisUpdate, nextUpdate, revokedCerts) + if err != nil { + panic(err) + } + return crl, crlPem +} + +// RevokeCertificates revokes the certificates in keyCerts at revocationTime, and returns the list of revoked +// certificates, which can be appended to an existing list of revoked certificates and passed to CreateCRL(). +func RevokeCertificates(revocationTime time.Time, keyCerts ...KeyCert) []pkix.RevokedCertificate { + revokedCerts := []pkix.RevokedCertificate{} + for _, keyCert := range keyCerts { + revokedCert := pkix.RevokedCertificate{ + RevocationTime: revocationTime, + SerialNumber: keyCert.Cert.SerialNumber, + } + revokedCerts = append(revokedCerts, revokedCert) + } + return revokedCerts +} + +// pkcs1PublicKey reflects the ASN.1 structure of a PKCS #1 public key. +type pkcs1PublicKey struct { + N *big.Int + E int +} + +// generateSubjectKeyID generates a subject key by hashing the ASN.1-encoded public key bit string, as proposed in +// section 4.2.1.2 of RFC-5280. +func generateSubjectKeyID(key *rsa.PrivateKey) ([]byte, error) { + publicKeyBytes, err := asn1.Marshal(pkcs1PublicKey{ + N: key.PublicKey.N, + E: key.PublicKey.E, + }) + if err != nil { + return nil, err + } + subjectKeyId := sha1.Sum(publicKeyBytes) + return subjectKeyId[:], nil +} diff --git a/test/e2e/util_test.go b/test/e2e/util_test.go index 2bd4b0fd9c..145ca0584f 100644 --- a/test/e2e/util_test.go +++ b/test/e2e/util_test.go @@ -79,6 +79,21 @@ func buildEchoPod(name, namespace string) *corev1.Pod { } } +// generateUnprivilegedSecurityContext returns a SecurityContext with the minimum possible privileges that satisfy +// restricted pod security requirements +func generateUnprivilegedSecurityContext() *corev1.SecurityContext { + return &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + RunAsNonRoot: pointer.Bool(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + } +} + func waitForHTTPClientCondition(t *testing.T, httpClient *http.Client, req *http.Request, interval, timeout time.Duration, compareFunc func(*http.Response) bool) error { t.Helper() return wait.PollImmediate(interval, timeout, func() (done bool, err error) { @@ -638,3 +653,38 @@ func verifyInternalIngressController(t *testing.T, name types.NamespacedName, ho t.Fatalf("failed to verify connectivity with workload with address: %s using internal curl client. Curl Pod Logs:\n%s", address, curlPodLogs) } } + +// assertDeleted tries to delete a cluster resource, and causes test failure if the delete fails. +func assertDeleted(t *testing.T, cl client.Client, thing client.Object) { + t.Helper() + if err := cl.Delete(context.TODO(), thing); err != nil { + if errors.IsNotFound(err) { + return + } + t.Fatalf("Failed to delete %s: %v", thing.GetName(), err) + } else { + t.Logf("Deleted %s", thing.GetName()) + } +} + +// assertDeletedWaitForCleanup tries to delete a cluster resource, and waits for it to actually be cleaned up before +// returning. It causes test failure if the delete fails or if the cleanup times out. +func assertDeletedWaitForCleanup(t *testing.T, cl client.Client, thing client.Object) { + t.Helper() + thingName := types.NamespacedName{ + Name: thing.GetName(), + Namespace: thing.GetNamespace(), + } + assertDeleted(t, cl, thing) + if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { + if err := cl.Get(context.TODO(), thingName, thing); err != nil { + if errors.IsNotFound(err) { + return true, nil + } + return false, err + } + return false, nil + }); err != nil { + t.Fatalf("Timed out waiting for %s to be cleaned up: %v", thing.GetName(), err) + } +} diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml deleted file mode 100644 index 102fb9a691..0000000000 --- a/vendor/github.com/blang/semver/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -matrix: - include: - - go: 1.4.3 - - go: 1.5.4 - - go: 1.6.3 - - go: 1.7 - - go: tip - allow_failures: - - go: tip -install: -- go get golang.org/x/tools/cmd/cover -- go get github.com/mattn/goveralls -script: -- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci - -repotoken $COVERALLS_TOKEN -- echo "Build examples" ; cd examples && go build -- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) -env: - global: - secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE deleted file mode 100644 index 5ba5c86fcb..0000000000 --- a/vendor/github.com/blang/semver/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md deleted file mode 100644 index 08b2e4a3d7..0000000000 --- a/vendor/github.com/blang/semver/README.md +++ /dev/null @@ -1,194 +0,0 @@ -semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) -====== - -semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. - -Usage ------ -```bash -$ go get github.com/blang/semver -``` -Note: Always vendor your dependencies or fix on a specific version tag. - -```go -import github.com/blang/semver -v1, err := semver.Make("1.0.0-beta") -v2, err := semver.Make("2.0.0-beta") -v1.Compare(v2) -``` - -Also check the [GoDocs](http://godoc.org/github.com/blang/semver). - -Why should I use this lib? ------ - -- Fully spec compatible -- No reflection -- No regex -- Fully tested (Coverage >99%) -- Readable parsing/validation errors -- Fast (See [Benchmarks](#benchmarks)) -- Only Stdlib -- Uses values instead of pointers -- Many features, see below - - -Features ------ - -- Parsing and validation at all levels -- Comparator-like comparisons -- Compare Helper Methods -- InPlace manipulation -- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` -- Wildcards `>=1.x`, `<=2.5.x` -- Sortable (implements sort.Interface) -- database/sql compatible (sql.Scanner/Valuer) -- encoding/json compatible (json.Marshaler/Unmarshaler) - -Ranges ------- - -A `Range` is a set of conditions which specify which versions satisfy the range. - -A condition is composed of an operator and a version. The supported operators are: - -- `<1.0.0` Less than `1.0.0` -- `<=1.0.0` Less than or equal to `1.0.0` -- `>1.0.0` Greater than `1.0.0` -- `>=1.0.0` Greater than or equal to `1.0.0` -- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` -- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. - -Note that spaces between the operator and the version will be gracefully tolerated. - -A `Range` can link multiple `Ranges` separated by space: - -Ranges can be linked by logical AND: - - - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` - - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` - -Ranges can also be linked by logical OR: - - - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` - -AND has a higher precedence than OR. It's not possible to use brackets. - -Ranges can be combined by both AND and OR - - - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` - -Range usage: - -``` -v, err := semver.Parse("1.2.3") -range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") -if range(v) { - //valid -} - -``` - -Example ------ - -Have a look at full examples in [examples/main.go](examples/main.go) - -```go -import github.com/blang/semver - -v, err := semver.Make("0.0.1-alpha.preview+123.github") -fmt.Printf("Major: %d\n", v.Major) -fmt.Printf("Minor: %d\n", v.Minor) -fmt.Printf("Patch: %d\n", v.Patch) -fmt.Printf("Pre: %s\n", v.Pre) -fmt.Printf("Build: %s\n", v.Build) - -// Prerelease versions array -if len(v.Pre) > 0 { - fmt.Println("Prerelease versions:") - for i, pre := range v.Pre { - fmt.Printf("%d: %q\n", i, pre) - } -} - -// Build meta data array -if len(v.Build) > 0 { - fmt.Println("Build meta data:") - for i, build := range v.Build { - fmt.Printf("%d: %q\n", i, build) - } -} - -v001, err := semver.Make("0.0.1") -// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE -v001.GT(v) == true -v.LT(v001) == true -v.GTE(v) == true -v.LTE(v) == true - -// Or use v.Compare(v2) for comparisons (-1, 0, 1): -v001.Compare(v) == 1 -v.Compare(v001) == -1 -v.Compare(v) == 0 - -// Manipulate Version in place: -v.Pre[0], err = semver.NewPRVersion("beta") -if err != nil { - fmt.Printf("Error parsing pre release version: %q", err) -} - -fmt.Println("\nValidate versions:") -v.Build[0] = "?" - -err = v.Validate() -if err != nil { - fmt.Printf("Validation failed: %s\n", err) -} -``` - - -Benchmarks ------ - - BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op - BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op - BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op - BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op - BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op - BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op - BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op - BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op - BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op - BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op - BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op - BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op - BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op - BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op - BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op - BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op - -See benchmark cases at [semver_test.go](semver_test.go) - - -Motivation ------ - -I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. - - -Contribution ------ - -Feel free to make a pull request. For bigger changes create a issue first to discuss about it. - - -License ------ - -See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go deleted file mode 100644 index a74bf7c449..0000000000 --- a/vendor/github.com/blang/semver/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json deleted file mode 100644 index 1cf8ebdd9c..0000000000 --- a/vendor/github.com/blang/semver/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "author": "blang", - "bugs": { - "URL": "https://github.com/blang/semver/issues", - "url": "https://github.com/blang/semver/issues" - }, - "gx": { - "dvcsimport": "github.com/blang/semver" - }, - "gxVersion": "0.10.0", - "language": "go", - "license": "MIT", - "name": "semver", - "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", - "version": "3.5.1" -} - diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go deleted file mode 100644 index fca406d479..0000000000 --- a/vendor/github.com/blang/semver/range.go +++ /dev/null @@ -1,416 +0,0 @@ -package semver - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -type wildcardType int - -const ( - noneWildcard wildcardType = iota - majorWildcard wildcardType = 1 - minorWildcard wildcardType = 2 - patchWildcard wildcardType = 3 -) - -func wildcardTypefromInt(i int) wildcardType { - switch i { - case 1: - return majorWildcard - case 2: - return minorWildcard - case 3: - return patchWildcard - default: - return noneWildcard - } -} - -type comparator func(Version, Version) bool - -var ( - compEQ comparator = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 0 - } - compNE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) != 0 - } - compGT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 1 - } - compGE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) >= 0 - } - compLT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == -1 - } - compLE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) <= 0 - } -) - -type versionRange struct { - v Version - c comparator -} - -// rangeFunc creates a Range from the given versionRange. -func (vr *versionRange) rangeFunc() Range { - return Range(func(v Version) bool { - return vr.c(v, vr.v) - }) -} - -// Range represents a range of versions. -// A Range can be used to check if a Version satisfies it: -// -// range, err := semver.ParseRange(">1.0.0 <2.0.0") -// range(semver.MustParse("1.1.1") // returns true -type Range func(Version) bool - -// OR combines the existing Range with another Range using logical OR. -func (rf Range) OR(f Range) Range { - return Range(func(v Version) bool { - return rf(v) || f(v) - }) -} - -// AND combines the existing Range with another Range using logical AND. -func (rf Range) AND(f Range) Range { - return Range(func(v Version) bool { - return rf(v) && f(v) - }) -} - -// ParseRange parses a range and returns a Range. -// If the range could not be parsed an error is returned. -// -// Valid ranges are: -// - "<1.0.0" -// - "<=1.0.0" -// - ">1.0.0" -// - ">=1.0.0" -// - "1.0.0", "=1.0.0", "==1.0.0" -// - "!1.0.0", "!=1.0.0" -// -// A Range can consist of multiple ranges separated by space: -// Ranges can be linked by logical AND: -// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" -// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 -// -// Ranges can also be linked by logical OR: -// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" -// -// AND has a higher precedence than OR. It's not possible to use brackets. -// -// Ranges can be combined by both AND and OR -// -// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` -func ParseRange(s string) (Range, error) { - parts := splitAndTrim(s) - orParts, err := splitORParts(parts) - if err != nil { - return nil, err - } - expandedParts, err := expandWildcardVersion(orParts) - if err != nil { - return nil, err - } - var orFn Range - for _, p := range expandedParts { - var andFn Range - for _, ap := range p { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - vr, err := buildVersionRange(opStr, vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) - } - rf := vr.rangeFunc() - - // Set function - if andFn == nil { - andFn = rf - } else { // Combine with existing function - andFn = andFn.AND(rf) - } - } - if orFn == nil { - orFn = andFn - } else { - orFn = orFn.OR(andFn) - } - - } - return orFn, nil -} - -// splitORParts splits the already cleaned parts by '||'. -// Checks for invalid positions of the operator and returns an -// error if found. -func splitORParts(parts []string) ([][]string, error) { - var ORparts [][]string - last := 0 - for i, p := range parts { - if p == "||" { - if i == 0 { - return nil, fmt.Errorf("First element in range is '||'") - } - ORparts = append(ORparts, parts[last:i]) - last = i + 1 - } - } - if last == len(parts) { - return nil, fmt.Errorf("Last element in range is '||'") - } - ORparts = append(ORparts, parts[last:]) - return ORparts, nil -} - -// buildVersionRange takes a slice of 2: operator and version -// and builds a versionRange, otherwise an error. -func buildVersionRange(opStr, vStr string) (*versionRange, error) { - c := parseComparator(opStr) - if c == nil { - return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) - } - v, err := Parse(vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) - } - - return &versionRange{ - v: v, - c: c, - }, nil - -} - -// inArray checks if a byte is contained in an array of bytes -func inArray(s byte, list []byte) bool { - for _, el := range list { - if el == s { - return true - } - } - return false -} - -// splitAndTrim splits a range string by spaces and cleans whitespaces -func splitAndTrim(s string) (result []string) { - last := 0 - var lastChar byte - excludeFromSplit := []byte{'>', '<', '='} - for i := 0; i < len(s); i++ { - if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { - if last < i-1 { - result = append(result, s[last:i]) - } - last = i + 1 - } else if s[i] != ' ' { - lastChar = s[i] - } - } - if last < len(s)-1 { - result = append(result, s[last:]) - } - - for i, v := range result { - result[i] = strings.Replace(v, " ", "", -1) - } - - // parts := strings.Split(s, " ") - // for _, x := range parts { - // if s := strings.TrimSpace(x); len(s) != 0 { - // result = append(result, s) - // } - // } - return -} - -// splitComparatorVersion splits the comparator from the version. -// Input must be free of leading or trailing spaces. -func splitComparatorVersion(s string) (string, string, error) { - i := strings.IndexFunc(s, unicode.IsDigit) - if i == -1 { - return "", "", fmt.Errorf("Could not get version from string: %q", s) - } - return strings.TrimSpace(s[0:i]), s[i:], nil -} - -// getWildcardType will return the type of wildcard that the -// passed version contains -func getWildcardType(vStr string) wildcardType { - parts := strings.Split(vStr, ".") - nparts := len(parts) - wildcard := parts[nparts-1] - - possibleWildcardType := wildcardTypefromInt(nparts) - if wildcard == "x" { - return possibleWildcardType - } - - return noneWildcard -} - -// createVersionFromWildcard will convert a wildcard version -// into a regular version, replacing 'x's with '0's, handling -// special cases like '1.x.x' and '1.x' -func createVersionFromWildcard(vStr string) string { - // handle 1.x.x - vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) - vStr2 = strings.Replace(vStr2, ".x", ".0", 1) - parts := strings.Split(vStr2, ".") - - // handle 1.x - if len(parts) == 2 { - return vStr2 + ".0" - } - - return vStr2 -} - -// incrementMajorVersion will increment the major version -// of the passed version -func incrementMajorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[0]) - if err != nil { - return "", err - } - parts[0] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// incrementMajorVersion will increment the minor version -// of the passed version -func incrementMinorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[1]) - if err != nil { - return "", err - } - parts[1] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// expandWildcardVersion will expand wildcards inside versions -// following these rules: -// -// * when dealing with patch wildcards: -// >= 1.2.x will become >= 1.2.0 -// <= 1.2.x will become < 1.3.0 -// > 1.2.x will become >= 1.3.0 -// < 1.2.x will become < 1.2.0 -// != 1.2.x will become < 1.2.0 >= 1.3.0 -// -// * when dealing with minor wildcards: -// >= 1.x will become >= 1.0.0 -// <= 1.x will become < 2.0.0 -// > 1.x will become >= 2.0.0 -// < 1.0 will become < 1.0.0 -// != 1.x will become < 1.0.0 >= 2.0.0 -// -// * when dealing with wildcards without -// version operator: -// 1.2.x will become >= 1.2.0 < 1.3.0 -// 1.x will become >= 1.0.0 < 2.0.0 -func expandWildcardVersion(parts [][]string) ([][]string, error) { - var expandedParts [][]string - for _, p := range parts { - var newParts []string - for _, ap := range p { - if strings.Index(ap, "x") != -1 { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - - versionWildcardType := getWildcardType(vStr) - flatVersion := createVersionFromWildcard(vStr) - - var resultOperator string - var shouldIncrementVersion bool - switch opStr { - case ">": - resultOperator = ">=" - shouldIncrementVersion = true - case ">=": - resultOperator = ">=" - case "<": - resultOperator = "<" - case "<=": - resultOperator = "<" - shouldIncrementVersion = true - case "", "=", "==": - newParts = append(newParts, ">="+flatVersion) - resultOperator = "<" - shouldIncrementVersion = true - case "!=", "!": - newParts = append(newParts, "<"+flatVersion) - resultOperator = ">=" - shouldIncrementVersion = true - } - - var resultVersion string - if shouldIncrementVersion { - switch versionWildcardType { - case patchWildcard: - resultVersion, _ = incrementMinorVersion(flatVersion) - case minorWildcard: - resultVersion, _ = incrementMajorVersion(flatVersion) - } - } else { - resultVersion = flatVersion - } - - ap = resultOperator + resultVersion - } - newParts = append(newParts, ap) - } - expandedParts = append(expandedParts, newParts) - } - - return expandedParts, nil -} - -func parseComparator(s string) comparator { - switch s { - case "==": - fallthrough - case "": - fallthrough - case "=": - return compEQ - case ">": - return compGT - case ">=": - return compGE - case "<": - return compLT - case "<=": - return compLE - case "!": - fallthrough - case "!=": - return compNE - } - - return nil -} - -// MustParseRange is like ParseRange but panics if the range cannot be parsed. -func MustParseRange(s string) Range { - r, err := ParseRange(s) - if err != nil { - panic(`semver: ParseRange(` + s + `): ` + err.Error()) - } - return r -} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go deleted file mode 100644 index 8ee0842e6a..0000000000 --- a/vendor/github.com/blang/semver/semver.go +++ /dev/null @@ -1,418 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precendence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (vp *Version, err error) { - v, err := Parse(s) - vp = &v - return -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// ParseTolerant allows for certain version specifications that do not strictly adhere to semver -// specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions -// with only major and minor components specified -func ParseTolerant(s string) (Version, error) { - s = strings.TrimSpace(s) - s = strings.TrimPrefix(s, "v") - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) < 3 { - if strings.ContainsAny(parts[len(parts)-1], "+-") { - return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") - } - for len(parts) < 3 { - parts = append(parts, "0") - } - s = strings.Join(parts, ".") - } - - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go deleted file mode 100644 index e18f880826..0000000000 --- a/vendor/github.com/blang/semver/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go deleted file mode 100644 index eb4d802666..0000000000 --- a/vendor/github.com/blang/semver/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/operator-framework/api/LICENSE b/vendor/github.com/operator-framework/api/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/operator-framework/api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/operator-framework/api/pkg/lib/version/version.go b/vendor/github.com/operator-framework/api/pkg/lib/version/version.go index 713c5779bb..a0ffb9fcbe 100644 --- a/vendor/github.com/operator-framework/api/pkg/lib/version/version.go +++ b/vendor/github.com/operator-framework/api/pkg/lib/version/version.go @@ -3,7 +3,7 @@ package version import ( "encoding/json" - "github.com/blang/semver" + semver "github.com/blang/semver/v4" ) // +k8s:openapi-gen=true diff --git a/vendor/github.com/operator-framework/api/pkg/operators/catalogsource_types.go b/vendor/github.com/operator-framework/api/pkg/operators/catalogsource_types.go deleted file mode 100644 index ff810f550a..0000000000 --- a/vendor/github.com/operator-framework/api/pkg/operators/catalogsource_types.go +++ /dev/null @@ -1,142 +0,0 @@ -package operators - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// CatalogSourceKind is the PascalCase name of a CatalogSource's kind. -const CatalogSourceKind = "CatalogSource" - -// SourceType indicates the type of backing store for a CatalogSource -type SourceType string - -const ( - // SourceTypeInternal (deprecated) specifies a CatalogSource of type SourceTypeConfigmap - SourceTypeInternal SourceType = "internal" - - // SourceTypeConfigmap specifies a CatalogSource that generates a configmap-server registry - SourceTypeConfigmap SourceType = "configmap" - - // SourceTypeGrpc specifies a CatalogSource that can use an operator registry image to generate a - // registry-server or connect to a pre-existing registry at an address. - SourceTypeGrpc SourceType = "grpc" -) - -type CatalogSourceSpec struct { - // SourceType is the type of source - SourceType SourceType - - // ConfigMap is the name of the ConfigMap to be used to back a configmap-server registry. - // Only used when SourceType = SourceTypeConfigmap or SourceTypeInternal. - // +Optional - ConfigMap string - - // Address is a host that OLM can use to connect to a pre-existing registry. - // Format: : - // Only used when SourceType = SourceTypeGrpc. - // Ignored when the Image field is set. - // +Optional - Address string - - // Image is an operator-registry container image to instantiate a registry-server with. - // Only used when SourceType = SourceTypeGrpc. - // If present, the address field is ignored. - // +Optional - Image string - - // UpdateStrategy defines how updated catalog source images can be discovered - // Consists of an interval that defines polling duration and an embedded strategy type - // +Optional - UpdateStrategy *UpdateStrategy - - // Secrets represent set of secrets that can be used to access the contents of the catalog. - // It is best to keep this list small, since each will need to be tried for every catalog entry. - // +Optional - Secrets []string - - // Metadata - DisplayName string - Description string - Publisher string - Icon Icon -} - -// UpdateStrategy holds all the different types of catalog source update strategies -// Currently only registry polling strategy is implemented -type UpdateStrategy struct { - *RegistryPoll -} - -type RegistryPoll struct { - // Interval is used to determine the time interval between checks of the latest catalog source version. - // The catalog operator polls to see if a new version of the catalog source is available. - // If available, the latest image is pulled and gRPC traffic is directed to the latest catalog source. - Interval *metav1.Duration -} - -type RegistryServiceStatus struct { - Protocol string - ServiceName string - ServiceNamespace string - Port string - CreatedAt metav1.Time -} - -type GRPCConnectionState struct { - Address string - LastObservedState string - LastConnectTime metav1.Time -} - -func (s *RegistryServiceStatus) Address() string { - return fmt.Sprintf("%s.%s.svc:%s", s.ServiceName, s.ServiceNamespace, s.Port) -} - -type CatalogSourceStatus struct { - Message string `json:"message,omitempty"` - Reason ConditionReason `json:"reason,omitempty"` - ConfigMapResource *ConfigMapResourceReference - RegistryServiceStatus *RegistryServiceStatus - GRPCConnectionState *GRPCConnectionState - LatestImageRegistryPoll *metav1.Time -} - -type ConfigMapResourceReference struct { - Name string - Namespace string - UID types.UID - ResourceVersion string - LastUpdateTime metav1.Time -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient - -// CatalogSource is a repository of CSVs, CRDs, and operator packages. -type CatalogSource struct { - metav1.TypeMeta - metav1.ObjectMeta - - Spec CatalogSourceSpec - Status CatalogSourceStatus -} - -func (c *CatalogSource) Address() string { - if c.Spec.Address != "" { - return c.Spec.Address - } - return c.Status.RegistryServiceStatus.Address() -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CatalogSourceList is a list of CatalogSource resources. -type CatalogSourceList struct { - metav1.TypeMeta - metav1.ListMeta - - Items []CatalogSource -} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/clusterserviceversion_types.go b/vendor/github.com/operator-framework/api/pkg/operators/clusterserviceversion_types.go deleted file mode 100644 index fded5c3cb6..0000000000 --- a/vendor/github.com/operator-framework/api/pkg/operators/clusterserviceversion_types.go +++ /dev/null @@ -1,516 +0,0 @@ -package operators - -import ( - "encoding/json" - "fmt" - "sort" - - appsv1 "k8s.io/api/apps/v1" - rbac "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/operator-framework/api/pkg/lib/version" -) - -// ClusterServiceVersionKind is the PascalCase name of a CSV's kind. -const ClusterServiceVersionKind = "ClusterServiceVersion" - -// InstallModeType is a supported type of install mode for CSV installation -type InstallModeType string - -const ( - // InstallModeTypeOwnNamespace indicates that the operator can be a member of an `OperatorGroup` that selects its own namespace. - InstallModeTypeOwnNamespace InstallModeType = "OwnNamespace" - // InstallModeTypeSingleNamespace indicates that the operator can be a member of an `OperatorGroup` that selects one namespace. - InstallModeTypeSingleNamespace InstallModeType = "SingleNamespace" - // InstallModeTypeMultiNamespace indicates that the operator can be a member of an `OperatorGroup` that selects more than one namespace. - InstallModeTypeMultiNamespace InstallModeType = "MultiNamespace" - // InstallModeTypeAllNamespaces indicates that the operator can be a member of an `OperatorGroup` that selects all namespaces (target namespace set is the empty string ""). - InstallModeTypeAllNamespaces InstallModeType = "AllNamespaces" - - InstallStrategyNameDeployment = "deployment" -) - -// InstallMode associates an InstallModeType with a flag representing if the CSV supports it -type InstallMode struct { - Type InstallModeType - Supported bool -} - -// InstallModeSet is a mapping of unique InstallModeTypes to whether they are supported. -type InstallModeSet map[InstallModeType]bool - -// NamedInstallStrategy represents the block of an ClusterServiceVersion resource -// where the install strategy is specified. -type NamedInstallStrategy struct { - StrategyName string - StrategySpec StrategyDetailsDeployment -} - -// StrategyDeploymentPermissions describe the rbac rules and service account needed by the install strategy -type StrategyDeploymentPermissions struct { - ServiceAccountName string - Rules []rbac.PolicyRule -} - -// StrategyDeploymentSpec contains the name and spec for the deployment ALM should create -type StrategyDeploymentSpec struct { - Name string - Spec appsv1.DeploymentSpec -} - -// StrategyDetailsDeployment represents the parsed details of a Deployment -// InstallStrategy. -type StrategyDetailsDeployment struct { - DeploymentSpecs []StrategyDeploymentSpec - Permissions []StrategyDeploymentPermissions - ClusterPermissions []StrategyDeploymentPermissions -} - -func (d *StrategyDetailsDeployment) GetStrategyName() string { - return InstallStrategyNameDeployment -} - -// StatusDescriptor describes a field in a status block of a CRD so that OLM can consume it -type StatusDescriptor struct { - Path string - DisplayName string - Description string - XDescriptors []string - Value json.RawMessage -} - -// SpecDescriptor describes a field in a spec block of a CRD so that OLM can consume it -type SpecDescriptor struct { - Path string - DisplayName string - Description string - XDescriptors []string - Value json.RawMessage -} - -// ActionDescriptor describes a declarative action that can be performed on a custom resource instance -type ActionDescriptor struct { - Path string - DisplayName string - Description string - XDescriptors []string - Value json.RawMessage -} - -// CRDDescription provides details to OLM about the CRDs -type CRDDescription struct { - Name string - Version string - Kind string - DisplayName string - Description string - Resources []APIResourceReference - StatusDescriptors []StatusDescriptor - SpecDescriptors []SpecDescriptor - ActionDescriptor []ActionDescriptor -} - -// APIServiceDescription provides details to OLM about apis provided via aggregation -type APIServiceDescription struct { - Name string - Group string - Version string - Kind string - DeploymentName string - ContainerPort int32 - DisplayName string - Description string - Resources []APIResourceReference - StatusDescriptors []StatusDescriptor - SpecDescriptors []SpecDescriptor - ActionDescriptor []ActionDescriptor -} - -// APIResourceReference is a Kubernetes resource type used by a custom resource -type APIResourceReference struct { - Name string - Kind string - Version string -} - -// GetName returns the name of an APIService as derived from its group and version. -func (d APIServiceDescription) GetName() string { - return fmt.Sprintf("%s.%s", d.Version, d.Group) -} - -// CustomResourceDefinitions declares all of the CRDs managed or required by -// an operator being ran by ClusterServiceVersion. -// -// If the CRD is present in the Owned list, it is implicitly required. -type CustomResourceDefinitions struct { - Owned []CRDDescription - Required []CRDDescription -} - -// APIServiceDefinitions declares all of the extension apis managed or required by -// an operator being ran by ClusterServiceVersion. -type APIServiceDefinitions struct { - Owned []APIServiceDescription - Required []APIServiceDescription -} - -// ClusterServiceVersionSpec declarations tell OLM how to install an operator -// that can manage apps for a given version. -type ClusterServiceVersionSpec struct { - InstallStrategy NamedInstallStrategy - Version version.OperatorVersion - Maturity string - CustomResourceDefinitions CustomResourceDefinitions - APIServiceDefinitions APIServiceDefinitions - NativeAPIs []metav1.GroupVersionKind - MinKubeVersion string - DisplayName string - Description string - Keywords []string - Maintainers []Maintainer - Provider AppLink - Links []AppLink - Icon []Icon - - // InstallModes specify supported installation types - // +optional - InstallModes []InstallMode - - // The name of a CSV this one replaces. Should match the `metadata.Name` field of the old CSV. - // +optional - Replaces string - - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. - // +optional - Labels map[string]string - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. - // +optional - Annotations map[string]string - - // Label selector for related resources. - // +optional - Selector *metav1.LabelSelector -} - -type Maintainer struct { - Name string - Email string -} - -type AppLink struct { - Name string - URL string -} - -type Icon struct { - Data string - MediaType string -} - -// ClusterServiceVersionPhase is a label for the condition of a ClusterServiceVersion at the current time. -type ClusterServiceVersionPhase string - -// These are the valid phases of ClusterServiceVersion -const ( - CSVPhaseNone = "" - // CSVPhasePending means the csv has been accepted by the system, but the install strategy has not been attempted. - // This is likely because there are unmet requirements. - CSVPhasePending ClusterServiceVersionPhase = "Pending" - // CSVPhaseInstallReady means that the requirements are met but the install strategy has not been run. - CSVPhaseInstallReady ClusterServiceVersionPhase = "InstallReady" - // CSVPhaseInstalling means that the install strategy has been initiated but not completed. - CSVPhaseInstalling ClusterServiceVersionPhase = "Installing" - // CSVPhaseSucceeded means that the resources in the CSV were created successfully. - CSVPhaseSucceeded ClusterServiceVersionPhase = "Succeeded" - // CSVPhaseFailed means that the install strategy could not be successfully completed. - CSVPhaseFailed ClusterServiceVersionPhase = "Failed" - // CSVPhaseUnknown means that for some reason the state of the csv could not be obtained. - CSVPhaseUnknown ClusterServiceVersionPhase = "Unknown" - // CSVPhaseReplacing means that a newer CSV has been created and the csv's resources will be transitioned to a new owner. - CSVPhaseReplacing ClusterServiceVersionPhase = "Replacing" - // CSVPhaseDeleting means that a CSV has been replaced by a new one and will be checked for safety before being deleted - CSVPhaseDeleting ClusterServiceVersionPhase = "Deleting" - // CSVPhaseAny matches all other phases in CSV queries - CSVPhaseAny ClusterServiceVersionPhase = "" -) - -// ConditionReason is a camelcased reason for the state transition -type ConditionReason string - -const ( - CSVReasonRequirementsUnknown ConditionReason = "RequirementsUnknown" - CSVReasonRequirementsNotMet ConditionReason = "RequirementsNotMet" - CSVReasonRequirementsMet ConditionReason = "AllRequirementsMet" - CSVReasonOwnerConflict ConditionReason = "OwnerConflict" - CSVReasonComponentFailed ConditionReason = "InstallComponentFailed" - CSVReasonInvalidStrategy ConditionReason = "InvalidInstallStrategy" - CSVReasonWaiting ConditionReason = "InstallWaiting" - CSVReasonInstallSuccessful ConditionReason = "InstallSucceeded" - CSVReasonInstallCheckFailed ConditionReason = "InstallCheckFailed" - CSVReasonComponentUnhealthy ConditionReason = "ComponentUnhealthy" - CSVReasonBeingReplaced ConditionReason = "BeingReplaced" - CSVReasonReplaced ConditionReason = "Replaced" - CSVReasonNeedsReinstall ConditionReason = "NeedsReinstall" - CSVReasonNeedsCertRotation ConditionReason = "NeedsCertRotation" - CSVReasonAPIServiceResourceIssue ConditionReason = "APIServiceResourceIssue" - CSVReasonAPIServiceResourcesNeedReinstall ConditionReason = "APIServiceResourcesNeedReinstall" - CSVReasonAPIServiceInstallFailed ConditionReason = "APIServiceInstallFailed" - CSVReasonCopied ConditionReason = "Copied" - CSVReasonInvalidInstallModes ConditionReason = "InvalidInstallModes" - CSVReasonNoTargetNamespaces ConditionReason = "NoTargetNamespaces" - CSVReasonUnsupportedOperatorGroup ConditionReason = "UnsupportedOperatorGroup" - CSVReasonNoOperatorGroup ConditionReason = "NoOperatorGroup" - CSVReasonTooManyOperatorGroups ConditionReason = "TooManyOperatorGroups" - CSVReasonInterOperatorGroupOwnerConflict ConditionReason = "InterOperatorGroupOwnerConflict" - CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs ConditionReason = "CannotModifyStaticOperatorGroupProvidedAPIs" -) - -// Conditions appear in the status as a record of state transitions on the ClusterServiceVersion -type ClusterServiceVersionCondition struct { - // Condition of the ClusterServiceVersion - Phase ClusterServiceVersionPhase - // A human readable message indicating details about why the ClusterServiceVersion is in this condition. - // +optional - Message string - // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state. - // e.g. 'RequirementsNotMet' - // +optional - Reason ConditionReason - // Last time we updated the status - // +optional - LastUpdateTime *metav1.Time - // Last time the status transitioned from one status to another. - // +optional - LastTransitionTime *metav1.Time -} - -// OwnsCRD determines whether the current CSV owns a paritcular CRD. -func (csv ClusterServiceVersion) OwnsCRD(name string) bool { - for _, desc := range csv.Spec.CustomResourceDefinitions.Owned { - if desc.Name == name { - return true - } - } - - return false -} - -// OwnsAPIService determines whether the current CSV owns a paritcular APIService. -func (csv ClusterServiceVersion) OwnsAPIService(name string) bool { - for _, desc := range csv.Spec.APIServiceDefinitions.Owned { - apiServiceName := fmt.Sprintf("%s.%s", desc.Version, desc.Group) - if apiServiceName == name { - return true - } - } - - return false -} - -// StatusReason is a camelcased reason for the status of a RequirementStatus or DependentStatus -type StatusReason string - -const ( - RequirementStatusReasonPresent StatusReason = "Present" - RequirementStatusReasonNotPresent StatusReason = "NotPresent" - RequirementStatusReasonPresentNotSatisfied StatusReason = "PresentNotSatisfied" - // The CRD is present but the Established condition is False (not available) - RequirementStatusReasonNotAvailable StatusReason = "PresentNotAvailable" - DependentStatusReasonSatisfied StatusReason = "Satisfied" - DependentStatusReasonNotSatisfied StatusReason = "NotSatisfied" -) - -// DependentStatus is the status for a dependent requirement (to prevent infinite nesting) -type DependentStatus struct { - Group string - Version string - Kind string - Status StatusReason - UUID string - Message string -} - -type RequirementStatus struct { - Group string - Version string - Kind string - Name string - Status StatusReason - Message string - UUID string - Dependents []DependentStatus -} - -// ClusterServiceVersionStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type ClusterServiceVersionStatus struct { - // Current condition of the ClusterServiceVersion - Phase ClusterServiceVersionPhase - // A human readable message indicating details about why the ClusterServiceVersion is in this condition. - // +optional - Message string - // A brief CamelCase message indicating details about why the ClusterServiceVersion is in this state. - // e.g. 'RequirementsNotMet' - // +optional - Reason ConditionReason - // Last time we updated the status - // +optional - LastUpdateTime *metav1.Time - // Last time the status transitioned from one status to another. - // +optional - LastTransitionTime *metav1.Time - // List of conditions, a history of state transitions - Conditions []ClusterServiceVersionCondition - // The status of each requirement for this CSV - RequirementStatus []RequirementStatus - // Last time the owned APIService certs were updated - // +optional - CertsLastUpdated *metav1.Time - // Time the owned APIService certs will rotate next - // +optional - CertsRotateAt *metav1.Time -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient - -// ClusterServiceVersion is a Custom Resource of type `ClusterServiceVersionSpec`. -type ClusterServiceVersion struct { - metav1.TypeMeta - metav1.ObjectMeta - - Spec ClusterServiceVersionSpec - Status ClusterServiceVersionStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterServiceVersionList represents a list of ClusterServiceVersions. -type ClusterServiceVersionList struct { - metav1.TypeMeta - metav1.ListMeta - - Items []ClusterServiceVersion -} - -// GetAllCRDDescriptions returns a deduplicated set of CRDDescriptions that is -// the union of the owned and required CRDDescriptions. -// -// Descriptions with the same name prefer the value in Owned. -// Descriptions are returned in alphabetical order. -func (csv ClusterServiceVersion) GetAllCRDDescriptions() []CRDDescription { - set := make(map[string]CRDDescription) - for _, required := range csv.Spec.CustomResourceDefinitions.Required { - set[required.Name] = required - } - - for _, owned := range csv.Spec.CustomResourceDefinitions.Owned { - set[owned.Name] = owned - } - - keys := make([]string, 0) - for key := range set { - keys = append(keys, key) - } - sort.StringSlice(keys).Sort() - - descs := make([]CRDDescription, 0) - for _, key := range keys { - descs = append(descs, set[key]) - } - - return descs -} - -// GetAllAPIServiceDescriptions returns a deduplicated set of APIServiceDescriptions that is -// the union of the owned and required APIServiceDescriptions. -// -// Descriptions with the same name prefer the value in Owned. -// Descriptions are returned in alphabetical order. -func (csv ClusterServiceVersion) GetAllAPIServiceDescriptions() []APIServiceDescription { - set := make(map[string]APIServiceDescription) - for _, required := range csv.Spec.APIServiceDefinitions.Required { - name := fmt.Sprintf("%s.%s", required.Version, required.Group) - set[name] = required - } - - for _, owned := range csv.Spec.APIServiceDefinitions.Owned { - name := fmt.Sprintf("%s.%s", owned.Version, owned.Group) - set[name] = owned - } - - keys := make([]string, 0) - for key := range set { - keys = append(keys, key) - } - sort.StringSlice(keys).Sort() - - descs := make([]APIServiceDescription, 0) - for _, key := range keys { - descs = append(descs, set[key]) - } - - return descs -} - -// GetRequiredAPIServiceDescriptions returns a deduplicated set of required APIServiceDescriptions -// with the intersection of required and owned removed -// Equivalent to the set subtraction required - owned -// -// Descriptions are returned in alphabetical order. -func (csv ClusterServiceVersion) GetRequiredAPIServiceDescriptions() []APIServiceDescription { - set := make(map[string]APIServiceDescription) - for _, required := range csv.Spec.APIServiceDefinitions.Required { - name := fmt.Sprintf("%s.%s", required.Version, required.Group) - set[name] = required - } - - // Remove any shared owned from the set - for _, owned := range csv.Spec.APIServiceDefinitions.Owned { - name := fmt.Sprintf("%s.%s", owned.Version, owned.Group) - if _, ok := set[name]; ok { - delete(set, name) - } - } - - keys := make([]string, 0) - for key := range set { - keys = append(keys, key) - } - sort.StringSlice(keys).Sort() - - descs := make([]APIServiceDescription, 0) - for _, key := range keys { - descs = append(descs, set[key]) - } - - return descs -} - -// GetOwnedAPIServiceDescriptions returns a deduplicated set of owned APIServiceDescriptions -// -// Descriptions are returned in alphabetical order. -func (csv ClusterServiceVersion) GetOwnedAPIServiceDescriptions() []APIServiceDescription { - set := make(map[string]APIServiceDescription) - for _, owned := range csv.Spec.APIServiceDefinitions.Owned { - name := owned.GetName() - set[name] = owned - } - - keys := make([]string, 0) - for key := range set { - keys = append(keys, key) - } - sort.StringSlice(keys).Sort() - - descs := make([]APIServiceDescription, 0) - for _, key := range keys { - descs = append(descs, set[key]) - } - - return descs -} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/doc.go index ee6b62a7d2..7eba794488 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/doc.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/doc.go @@ -1,5 +1,4 @@ -// +k8s:deepcopy-gen=package -// +groupName=operators.coreos.com // +kubebuilder:skip + // Package operators contains all resource types of the operators.coreos.com API group. package operators diff --git a/vendor/github.com/operator-framework/api/pkg/operators/installplan_types.go b/vendor/github.com/operator-framework/api/pkg/operators/installplan_types.go deleted file mode 100644 index fcd7764e87..0000000000 --- a/vendor/github.com/operator-framework/api/pkg/operators/installplan_types.go +++ /dev/null @@ -1,392 +0,0 @@ -package operators - -import ( - "errors" - "fmt" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// InstallPlanKind is the PascalCase name of an InstallPlan's kind. -const InstallPlanKind = "InstallPlan" - -// Approval is the user approval policy for an InstallPlan. -type Approval string - -const ( - ApprovalAutomatic Approval = "Automatic" - ApprovalManual Approval = "Manual" -) - -// InstallPlanSpec defines a set of Application resources to be installed -type InstallPlanSpec struct { - CatalogSource string - CatalogSourceNamespace string - ClusterServiceVersionNames []string - Approval Approval - Approved bool -} - -// InstallPlanPhase is the current status of a InstallPlan as a whole. -type InstallPlanPhase string - -const ( - InstallPlanPhaseNone InstallPlanPhase = "" - InstallPlanPhasePlanning InstallPlanPhase = "Planning" - InstallPlanPhaseRequiresApproval InstallPlanPhase = "RequiresApproval" - InstallPlanPhaseInstalling InstallPlanPhase = "Installing" - InstallPlanPhaseComplete InstallPlanPhase = "Complete" - InstallPlanPhaseFailed InstallPlanPhase = "Failed" -) - -// InstallPlanConditionType describes the state of an InstallPlan at a certain point as a whole. -type InstallPlanConditionType string - -const ( - InstallPlanResolved InstallPlanConditionType = "Resolved" - InstallPlanInstalled InstallPlanConditionType = "Installed" -) - -// ConditionReason is a camelcased reason for the state transition. -type InstallPlanConditionReason string - -const ( - InstallPlanReasonPlanUnknown InstallPlanConditionReason = "PlanUnknown" - InstallPlanReasonInstallCheckFailed InstallPlanConditionReason = "InstallCheckFailed" - InstallPlanReasonDependencyConflict InstallPlanConditionReason = "DependenciesConflict" - InstallPlanReasonComponentFailed InstallPlanConditionReason = "InstallComponentFailed" -) - -// StepStatus is the current status of a particular resource an in -// InstallPlan -type StepStatus string - -const ( - StepStatusUnknown StepStatus = "Unknown" - StepStatusNotPresent StepStatus = "NotPresent" - StepStatusPresent StepStatus = "Present" - StepStatusCreated StepStatus = "Created" - StepStatusWaitingForAPI StepStatus = "WaitingForApi" - StepStatusUnsupportedResource StepStatus = "UnsupportedResource" -) - -// ErrInvalidInstallPlan is the error returned by functions that operate on -// InstallPlans when the InstallPlan does not contain totally valid data. -var ErrInvalidInstallPlan = errors.New("the InstallPlan contains invalid data") - -// InstallPlanStatus represents the information about the status of -// steps required to complete installation. -// -// Status may trail the actual state of a system. -type InstallPlanStatus struct { - Phase InstallPlanPhase - Conditions []InstallPlanCondition - CatalogSources []string - Plan []*Step - // BundleLookups is the set of in-progress requests to pull and unpackage bundle content to the cluster. - // +optional - BundleLookups []BundleLookup - // AttenuatedServiceAccountRef references the service account that is used - // to do scoped operator install. - AttenuatedServiceAccountRef *corev1.ObjectReference -} - -// InstallPlanCondition represents the overall status of the execution of -// an InstallPlan. -type InstallPlanCondition struct { - Type InstallPlanConditionType - Status corev1.ConditionStatus // True, False, or Unknown - LastUpdateTime *metav1.Time - LastTransitionTime *metav1.Time - Reason InstallPlanConditionReason - Message string -} - -// allow overwriting `now` function for deterministic tests -var now = metav1.Now - -// GetCondition returns the InstallPlanCondition of the given type if it exists in the InstallPlanStatus' Conditions. -// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found. -func (s InstallPlanStatus) GetCondition(conditionType InstallPlanConditionType) InstallPlanCondition { - for _, cond := range s.Conditions { - if cond.Type == conditionType { - return cond - } - } - - return InstallPlanCondition{ - Type: conditionType, - Status: corev1.ConditionUnknown, - } -} - -// SetCondition adds or updates a condition, using `Type` as merge key. -func (s *InstallPlanStatus) SetCondition(cond InstallPlanCondition) InstallPlanCondition { - for i, existing := range s.Conditions { - if existing.Type != cond.Type { - continue - } - if existing.Status == cond.Status { - cond.LastTransitionTime = existing.LastTransitionTime - } - s.Conditions[i] = cond - return cond - } - s.Conditions = append(s.Conditions, cond) - return cond -} - -func ConditionFailed(cond InstallPlanConditionType, reason InstallPlanConditionReason, message string, now *metav1.Time) InstallPlanCondition { - return InstallPlanCondition{ - Type: cond, - Status: corev1.ConditionFalse, - Reason: reason, - Message: message, - LastUpdateTime: now, - LastTransitionTime: now, - } -} - -func ConditionMet(cond InstallPlanConditionType, now *metav1.Time) InstallPlanCondition { - return InstallPlanCondition{ - Type: cond, - Status: corev1.ConditionTrue, - LastUpdateTime: now, - LastTransitionTime: now, - } -} - -// Step represents the status of an individual step in an InstallPlan. -type Step struct { - Resolving string - Resource StepResource - Status StepStatus -} - -// BundleLookupConditionType is a category of the overall state of a BundleLookup. -type BundleLookupConditionType string - -const ( - // BundleLookupPending describes BundleLookups that are not complete. - BundleLookupPending BundleLookupConditionType = "BundleLookupPending" - - crdKind = "CustomResourceDefinition" -) - -type BundleLookupCondition struct { - // Type of condition. - Type BundleLookupConditionType - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string - // Last time the condition was probed - // +optional - LastUpdateTime *metav1.Time - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime *metav1.Time -} - -// BundleLookup is a request to pull and unpackage the content of a bundle to the cluster. -type BundleLookup struct { - // Path refers to the location of a bundle to pull. - // It's typically an image reference. - Path string - // Replaces is the name of the bundle to replace with the one found at Path. - Replaces string - // CatalogSourceRef is a reference to the CatalogSource the bundle path was resolved from. - CatalogSourceRef *corev1.ObjectReference - // Conditions represents the overall state of a BundleLookup. - // +optional - Conditions []BundleLookupCondition -} - -// GetCondition returns the BundleLookupCondition of the given type if it exists in the BundleLookup's Conditions. -// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found. -func (b BundleLookup) GetCondition(conditionType BundleLookupConditionType) BundleLookupCondition { - for _, cond := range b.Conditions { - if cond.Type == conditionType { - return cond - } - } - - return BundleLookupCondition{ - Type: conditionType, - Status: corev1.ConditionUnknown, - } -} - -// RemoveCondition removes the BundleLookupCondition of the given type from the BundleLookup's Conditions if it exists. -func (b *BundleLookup) RemoveCondition(conditionType BundleLookupConditionType) { - for i, cond := range b.Conditions { - if cond.Type == conditionType { - b.Conditions = append(b.Conditions[:i], b.Conditions[i+1:]...) - if len(b.Conditions) == 0 { - b.Conditions = nil - } - return - } - } -} - -// SetCondition replaces the existing BundleLookupCondition of the same type, or adds it if it was not found. -func (b *BundleLookup) SetCondition(cond BundleLookupCondition) BundleLookupCondition { - for i, existing := range b.Conditions { - if existing.Type != cond.Type { - continue - } - if existing.Status == cond.Status { - cond.LastTransitionTime = existing.LastTransitionTime - } - b.Conditions[i] = cond - return cond - } - b.Conditions = append(b.Conditions, cond) - - return cond -} - -func OrderSteps(steps []*Step) []*Step { - // CSVs must be applied first - csvList := []*Step{} - - // CRDs must be applied second - crdList := []*Step{} - - // Other resources may be applied in any order - remainingResources := []*Step{} - for _, step := range steps { - switch step.Resource.Kind { - case crdKind: - crdList = append(crdList, step) - case ClusterServiceVersionKind: - csvList = append(csvList, step) - default: - remainingResources = append(remainingResources, step) - } - } - - result := make([]*Step, len(steps)) - i := 0 - - for j := range csvList { - result[i] = csvList[j] - i++ - } - - for j := range crdList { - result[i] = crdList[j] - i++ - } - - for j := range remainingResources { - result[i] = remainingResources[j] - i++ - } - - return result -} - -func (s InstallPlanStatus) NeedsRequeue() bool { - for _, step := range s.Plan { - switch step.Status { - case StepStatusWaitingForAPI: - return true - } - } - - return false -} - -// ManifestsMatch returns true if the CSV manifests in the StepResources of the given list of steps -// matches those in the InstallPlanStatus. -func (s *InstallPlanStatus) CSVManifestsMatch(steps []*Step) bool { - if s.Plan == nil && steps == nil { - return true - } - if s.Plan == nil || steps == nil { - return false - } - - manifests := make(map[string]struct{}) - for _, step := range s.Plan { - resource := step.Resource - if resource.Kind != ClusterServiceVersionKind { - continue - } - manifests[resource.Manifest] = struct{}{} - } - - for _, step := range steps { - resource := step.Resource - if resource.Kind != ClusterServiceVersionKind { - continue - } - if _, ok := manifests[resource.Manifest]; !ok { - return false - } - delete(manifests, resource.Manifest) - } - - return len(manifests) == 0 -} - -func (s *Step) String() string { - return fmt.Sprintf("%s: %s (%s)", s.Resolving, s.Resource, s.Status) -} - -// StepResource represents the status of a resource to be tracked by an -// InstallPlan. -type StepResource struct { - CatalogSource string - CatalogSourceNamespace string - Group string - Version string - Kind string - Name string - Manifest string -} - -func (r StepResource) String() string { - return fmt.Sprintf("%s[%s/%s/%s (%s/%s)]", r.Name, r.Group, r.Version, r.Kind, r.CatalogSource, r.CatalogSourceNamespace) -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient - -// InstallPlan defines the installation of a set of operators. -type InstallPlan struct { - metav1.TypeMeta - metav1.ObjectMeta - - Spec InstallPlanSpec - Status InstallPlanStatus -} - -// EnsureCatalogSource ensures that a CatalogSource is present in the Status -// block of an InstallPlan. -func (p *InstallPlan) EnsureCatalogSource(sourceName string) { - for _, srcName := range p.Status.CatalogSources { - if srcName == sourceName { - return - } - } - - p.Status.CatalogSources = append(p.Status.CatalogSources, sourceName) -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InstallPlanList is a list of InstallPlan resources. -type InstallPlanList struct { - metav1.TypeMeta - metav1.ListMeta - - Items []InstallPlan -} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/operatorgroup_types.go b/vendor/github.com/operator-framework/api/pkg/operators/operatorgroup_types.go deleted file mode 100644 index e94386c778..0000000000 --- a/vendor/github.com/operator-framework/api/pkg/operators/operatorgroup_types.go +++ /dev/null @@ -1,79 +0,0 @@ -package operators - -import ( - "sort" - "strings" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// OperatorGroupKind is the PascalCase name of an OperatorGroup's kind. -const OperatorGroupKind = "OperatorGroup" - -const ( - OperatorGroupAnnotationKey = "olm.operatorGroup" - OperatorGroupNamespaceAnnotationKey = "olm.operatorNamespace" - OperatorGroupTargetsAnnotationKey = "olm.targetNamespaces" - OperatorGroupProvidedAPIsAnnotationKey = "olm.providedAPIs" -) - -// OperatorGroupSpec is the spec for an OperatorGroup resource. -type OperatorGroupSpec struct { - // Selector selects the OperatorGroup's target namespaces. - // +optional - Selector *metav1.LabelSelector - - // TargetNamespaces is an explicit set of namespaces to target. - // If it is set, Selector is ignored. - // +optional - TargetNamespaces []string - - // ServiceAccountName is the admin specified service account which will be - // used to deploy operator(s) in this operator group. - ServiceAccountName string - - // Static tells OLM not to update the OperatorGroup's providedAPIs annotation - // +optional - StaticProvidedAPIs bool -} - -// OperatorGroupStatus is the status for an OperatorGroupResource. -type OperatorGroupStatus struct { - // Namespaces is the set of target namespaces for the OperatorGroup. - Namespaces []string - - // ServiceAccountRef references the service account object specified. - ServiceAccountRef *corev1.ObjectReference - - // LastUpdated is a timestamp of the last time the OperatorGroup's status was Updated. - LastUpdated *metav1.Time -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient - -// OperatorGroup is the unit of multitenancy for OLM managed operators. -// It constrains the installation of operators in its namespace to a specified set of target namespaces. -type OperatorGroup struct { - metav1.TypeMeta - metav1.ObjectMeta - - Spec OperatorGroupSpec - Status OperatorGroupStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// OperatorGroupList is a list of OperatorGroup resources. -type OperatorGroupList struct { - metav1.TypeMeta - metav1.ListMeta - - Items []OperatorGroup -} - -func (o *OperatorGroup) BuildTargetNamespaces() string { - sort.Strings(o.Status.Namespaces) - return strings.Join(o.Status.Namespaces, ",") -} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/register.go b/vendor/github.com/operator-framework/api/pkg/operators/register.go index 784b040264..e3c31d51ac 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/register.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/register.go @@ -10,41 +10,22 @@ const ( GroupName = "operators.coreos.com" // GroupVersion is the group version used in this package. GroupVersion = runtime.APIVersionInternal -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} + // LEGACY: Exported kind names, remove after major version bump -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder initializes a scheme builder - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme + // ClusterServiceVersionKind is the kind name for ClusterServiceVersion resources. + ClusterServiceVersionKind = "ClusterServiceVersion" + // CatalogSourceKind is the kind name for CatalogSource resources. + CatalogSourceKind = "CatalogSource" + // InstallPlanKind is the kind name for InstallPlan resources. + InstallPlanKind = "InstallPlan" + // SubscriptionKind is the kind name for Subscription resources. + SubscriptionKind = "Subscription" + // OperatorKind is the kind name for Operator resources. + OperatorKind = "Operator" + // OperatorGroupKind is the kind name for OperatorGroup resources. + OperatorGroupKind = "OperatorGroup" ) -// addKnownTypes adds the list of known types to Scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CatalogSource{}, - &CatalogSourceList{}, - &InstallPlan{}, - &InstallPlanList{}, - &Subscription{}, - &SubscriptionList{}, - &ClusterServiceVersion{}, - &ClusterServiceVersionList{}, - &OperatorGroup{}, - &OperatorGroupList{}, - ) - return nil -} +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/subscription_types.go b/vendor/github.com/operator-framework/api/pkg/operators/subscription_types.go deleted file mode 100644 index 931207681e..0000000000 --- a/vendor/github.com/operator-framework/api/pkg/operators/subscription_types.go +++ /dev/null @@ -1,312 +0,0 @@ -package operators - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// SubscriptionKind is the PascalCase name of a Subscription's kind. -const SubscriptionKind = "Subscription" - -// SubscriptionState tracks when updates are available, installing, or service is up to date -type SubscriptionState string - -const ( - SubscriptionStateNone = "" - SubscriptionStateFailed = "UpgradeFailed" - SubscriptionStateUpgradeAvailable = "UpgradeAvailable" - SubscriptionStateUpgradePending = "UpgradePending" - SubscriptionStateAtLatest = "AtLatestKnown" -) - -const ( - SubscriptionReasonInvalidCatalog ConditionReason = "InvalidCatalog" - SubscriptionReasonUpgradeSucceeded ConditionReason = "UpgradeSucceeded" -) - -// SubscriptionSpec defines an Application that can be installed -type SubscriptionSpec struct { - CatalogSource string - CatalogSourceNamespace string - Package string - Channel string - StartingCSV string - InstallPlanApproval Approval - Config SubscriptionConfig -} - -// SubscriptionConfig contains configuration specified for a subscription. -type SubscriptionConfig struct { - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // It must match the pod template's labels. - Selector *metav1.LabelSelector `json:"selector,omitempty"` - - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // If specified, the pod's tolerations. - // +optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty"` - - // Compute Resources required by this container. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty"` - - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` - // List of environment variables to set in the container. - // Cannot be updated. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - Env []corev1.EnvVar `json:"env,omitempty"` - - // List of Volumes to set in the podSpec. - // +optional - Volumes []corev1.Volume `json:"volumes,omitempty"` - - // List of VolumeMounts to set in the container. - // +optional - VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` -} - -// SubscriptionConditionType indicates an explicit state condition about a Subscription in "abnormal-true" -// polarity form (see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties). -type SubscriptionConditionType string - -const ( - // SubscriptionCatalogSourcesUnhealthy indicates that some or all of the CatalogSources to be used in resolution are unhealthy. - SubscriptionCatalogSourcesUnhealthy SubscriptionConditionType = "CatalogSourcesUnhealthy" - - // SubscriptionInstallPlanMissing indicates that a Subscription's InstallPlan is missing. - SubscriptionInstallPlanMissing SubscriptionConditionType = "InstallPlanMissing" - - // SubscriptionInstallPlanPending indicates that a Subscription's InstallPlan is pending installation. - SubscriptionInstallPlanPending SubscriptionConditionType = "InstallPlanPending" - - // SubscriptionInstallPlanFailed indicates that the installation of a Subscription's InstallPlan has failed. - SubscriptionInstallPlanFailed SubscriptionConditionType = "InstallPlanFailed" -) - -const ( - // NoCatalogSourcesFound is a reason string for Subscriptions with unhealthy CatalogSources due to none being available. - NoCatalogSourcesFound = "NoCatalogSourcesFound" - - // AllCatalogSourcesHealthy is a reason string for Subscriptions that transitioned due to all CatalogSources being healthy. - AllCatalogSourcesHealthy = "AllCatalogSourcesHealthy" - - // CatalogSourcesAdded is a reason string for Subscriptions that transitioned due to CatalogSources being added. - CatalogSourcesAdded = "CatalogSourcesAdded" - - // CatalogSourcesUpdated is a reason string for Subscriptions that transitioned due to CatalogSource being updated. - CatalogSourcesUpdated = "CatalogSourcesUpdated" - - // CatalogSourcesDeleted is a reason string for Subscriptions that transitioned due to CatalogSources being removed. - CatalogSourcesDeleted = "CatalogSourcesDeleted" - - // UnhealthyCatalogSourceFound is a reason string for Subscriptions that transitioned because an unhealthy CatalogSource was found. - UnhealthyCatalogSourceFound = "UnhealthyCatalogSourceFound" - - // ReferencedInstallPlanNotFound is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being found. - ReferencedInstallPlanNotFound = "ReferencedInstallPlanNotFound" - - // InstallPlanNotYetReconciled is a reason string for Subscriptions that transitioned due to a referenced InstallPlan not being reconciled yet. - InstallPlanNotYetReconciled = "InstallPlanNotYetReconciled" - - // InstallPlanFailed is a reason string for Subscriptions that transitioned due to a referenced InstallPlan failing without setting an explicit failure condition. - InstallPlanFailed = "InstallPlanFailed" -) - -// SubscriptionCondition represents the latest available observations of a Subscription's state. -type SubscriptionCondition struct { - // Type is the type of Subscription condition. - Type SubscriptionConditionType - - // Status is the status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus - - // Reason is a one-word CamelCase reason for the condition's last transition. - // +optional - Reason string - - // Message is a human-readable message indicating details about last transition. - // +optional - Message string - - // LastHeartbeatTime is the last time we got an update on a given condition - // +optional - LastHeartbeatTime *metav1.Time - - // LastTransitionTime is the last time the condition transit from one status to another - // +optional - LastTransitionTime *metav1.Time -} - -// Equals returns true if a SubscriptionCondition equals the one given, false otherwise. -// Equality is determined by the equality of the type, status, reason, and message fields ONLY. -func (s SubscriptionCondition) Equals(condition SubscriptionCondition) bool { - return s.Type == condition.Type && s.Status == condition.Status && s.Reason == condition.Reason && s.Message == condition.Message -} - -type SubscriptionStatus struct { - // CurrentCSV is the CSV the Subscription is progressing to. - // +optional - CurrentCSV string - - // InstalledCSV is the CSV currently installed by the Subscription. - // +optional - InstalledCSV string - - // Install is a reference to the latest InstallPlan generated for the Subscription. - // DEPRECATED: InstallPlanRef - // +optional - Install *InstallPlanReference - - // State represents the current state of the Subscription - // +optional - State SubscriptionState - - // Reason is the reason the Subscription was transitioned to its current state. - // +optional - Reason ConditionReason - - // InstallPlanRef is a reference to the latest InstallPlan that contains the Subscription's current CSV. - // +optional - InstallPlanRef *corev1.ObjectReference - - // CatalogHealth contains the Subscription's view of its relevant CatalogSources' status. - // It is used to determine SubscriptionStatusConditions related to CatalogSources. - // +optional - CatalogHealth []SubscriptionCatalogHealth - - // Conditions is a list of the latest available observations about a Subscription's current state. - // +optional - Conditions []SubscriptionCondition `hash:"set"` - - // LastUpdated represents the last time that the Subscription status was updated. - LastUpdated metav1.Time -} - -// GetCondition returns the SubscriptionCondition of the given type if it exists in the SubscriptionStatus' Conditions. -// Returns a condition of the given type with a ConditionStatus of "Unknown" if not found. -func (s SubscriptionStatus) GetCondition(conditionType SubscriptionConditionType) SubscriptionCondition { - for _, cond := range s.Conditions { - if cond.Type == conditionType { - return cond - } - } - - return SubscriptionCondition{ - Type: conditionType, - Status: corev1.ConditionUnknown, - } -} - -// SetCondition sets the given SubscriptionCondition in the SubscriptionStatus' Conditions. -func (s *SubscriptionStatus) SetCondition(condition SubscriptionCondition) { - for i, cond := range s.Conditions { - if cond.Type == condition.Type { - s.Conditions[i] = condition - return - } - } - - s.Conditions = append(s.Conditions, condition) -} - -// RemoveConditions removes any conditions of the given types from the SubscriptionStatus' Conditions. -func (s *SubscriptionStatus) RemoveConditions(remove ...SubscriptionConditionType) { - exclusions := map[SubscriptionConditionType]struct{}{} - for _, r := range remove { - exclusions[r] = struct{}{} - } - - var filtered []SubscriptionCondition - for _, cond := range s.Conditions { - if _, ok := exclusions[cond.Type]; ok { - // Skip excluded condition types - continue - } - filtered = append(filtered, cond) - } - - s.Conditions = filtered -} - -type InstallPlanReference struct { - APIVersion string - Kind string - Name string - UID types.UID -} - -// SubscriptionCatalogHealth describes the health of a CatalogSource the Subscription knows about. -type SubscriptionCatalogHealth struct { - // CatalogSourceRef is a reference to a CatalogSource. - CatalogSourceRef *corev1.ObjectReference - - // LastUpdated represents the last time that the CatalogSourceHealth changed - LastUpdated *metav1.Time - - // Healthy is true if the CatalogSource is healthy; false otherwise. - Healthy bool -} - -// Equals returns true if a SubscriptionCatalogHealth equals the one given, false otherwise. -// Equality is based SOLEY on health and UID. -func (s SubscriptionCatalogHealth) Equals(health SubscriptionCatalogHealth) bool { - return s.Healthy == health.Healthy && s.CatalogSourceRef.UID == health.CatalogSourceRef.UID -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient - -// Subscription keeps operators up to date by tracking changes to Catalogs. -type Subscription struct { - metav1.TypeMeta - metav1.ObjectMeta - - Spec *SubscriptionSpec - Status SubscriptionStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SubscriptionList is a list of Subscription resources. -type SubscriptionList struct { - metav1.TypeMeta - metav1.ListMeta - - Items []Subscription -} - -// GetInstallPlanApproval gets the configured install plan approval or the default -func (s *Subscription) GetInstallPlanApproval() Approval { - if s.Spec.InstallPlanApproval == ApprovalManual { - return ApprovalManual - } - return ApprovalAutomatic -} - -// NewInstallPlanReference returns an InstallPlanReference for the given ObjectReference. -func NewInstallPlanReference(ref *corev1.ObjectReference) *InstallPlanReference { - return &InstallPlanReference{ - APIVersion: ref.APIVersion, - Kind: ref.Kind, - Name: ref.Name, - UID: ref.UID, - } -} diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go index e7d041fa7a..726f8e1aa1 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go @@ -1,17 +1,19 @@ package v1alpha1 import ( + "encoding/json" "fmt" - "time" - "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "time" ) const ( - CatalogSourceCRDAPIVersion = GroupName + "/" + GroupVersion - CatalogSourceKind = "CatalogSource" + CatalogSourceCRDAPIVersion = GroupName + "/" + GroupVersion + CatalogSourceKind = "CatalogSource" + DefaultRegistryPollDuration = 15 * time.Minute ) // SourceType indicates the type of backing store for a CatalogSource @@ -36,38 +38,54 @@ const ( CatalogSourceConfigMapError ConditionReason = "ConfigMapError" // CatalogSourceRegistryServerError denotes when there is an issue querying the specified registry server. CatalogSourceRegistryServerError ConditionReason = "RegistryServerError" + // CatalogSourceIntervalInvalidError denotes if the registry polling interval is invalid. + CatalogSourceIntervalInvalidError ConditionReason = "InvalidIntervalError" ) type CatalogSourceSpec struct { // SourceType is the type of source SourceType SourceType `json:"sourceType"` + // Priority field assigns a weight to the catalog source to prioritize them so that it can be consumed by the dependency resolver. + // Usage: + // Higher weight indicates that this catalog source is preferred over lower weighted catalog sources during dependency resolution. + // The range of the priority value can go from positive to negative in the range of int32. + // The default value to a catalog source with unassigned priority would be 0. + // The catalog source with the same priority values will be ranked lexicographically based on its name. + // +optional + Priority int `json:"priority,omitempty"` + // ConfigMap is the name of the ConfigMap to be used to back a configmap-server registry. // Only used when SourceType = SourceTypeConfigmap or SourceTypeInternal. - // +Optional + // +optional ConfigMap string `json:"configMap,omitempty"` // Address is a host that OLM can use to connect to a pre-existing registry. // Format: : // Only used when SourceType = SourceTypeGrpc. // Ignored when the Image field is set. - // +Optional + // +optional Address string `json:"address,omitempty"` // Image is an operator-registry container image to instantiate a registry-server with. // Only used when SourceType = SourceTypeGrpc. // If present, the address field is ignored. - // +Optional + // +optional Image string `json:"image,omitempty"` + // GrpcPodConfig exposes different overrides for the pod spec of the CatalogSource Pod. + // Only used when SourceType = SourceTypeGrpc and Image is set. + // +optional + GrpcPodConfig *GrpcPodConfig `json:"grpcPodConfig,omitempty"` + // UpdateStrategy defines how updated catalog source images can be discovered // Consists of an interval that defines polling duration and an embedded strategy type - // +Optional + // +optional UpdateStrategy *UpdateStrategy `json:"updateStrategy,omitempty"` // Secrets represent set of secrets that can be used to access the contents of the catalog. // It is best to keep this list small, since each will need to be tried for every catalog entry. - // +Optional + // +optional Secrets []string `json:"secrets,omitempty"` // Metadata @@ -77,6 +95,24 @@ type CatalogSourceSpec struct { Icon Icon `json:"icon,omitempty"` } +// GrpcPodConfig contains configuration specified for a catalog source +type GrpcPodConfig struct { + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations are the catalog source's pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // If specified, indicates the pod's priority. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName *string `json:"priorityClassName,omitempty"` +} + // UpdateStrategy holds all the different types of catalog source update strategies // Currently only registry polling strategy is implemented type UpdateStrategy struct { @@ -87,7 +123,32 @@ type RegistryPoll struct { // Interval is used to determine the time interval between checks of the latest catalog source version. // The catalog operator polls to see if a new version of the catalog source is available. // If available, the latest image is pulled and gRPC traffic is directed to the latest catalog source. - Interval *metav1.Duration `json:"interval,omitempty"` + RawInterval string `json:"interval,omitempty"` + Interval *metav1.Duration `json:"-"` + ParsingError string `json:"-"` +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (u *UpdateStrategy) UnmarshalJSON(data []byte) (err error) { + type alias struct { + *RegistryPoll `json:"registryPoll,omitempty"` + } + us := alias{} + if err = json.Unmarshal(data, &us); err != nil { + return err + } + registryPoll := &RegistryPoll{ + RawInterval: us.RegistryPoll.RawInterval, + } + duration, err := time.ParseDuration(registryPoll.RawInterval) + if err != nil { + registryPoll.ParsingError = fmt.Sprintf("error parsing spec.updateStrategy.registryPoll.interval. Using the default value of %s instead. Error: %s", DefaultRegistryPollDuration, err) + registryPoll.Interval = &metav1.Duration{Duration: DefaultRegistryPollDuration} + } else { + registryPoll.Interval = &metav1.Duration{Duration: duration} + } + u.RegistryPoll = registryPoll + return nil } type RegistryServiceStatus struct { @@ -122,6 +183,16 @@ type CatalogSourceStatus struct { ConfigMapResource *ConfigMapResourceReference `json:"configMapReference,omitempty"` RegistryServiceStatus *RegistryServiceStatus `json:"registryService,omitempty"` GRPCConnectionState *GRPCConnectionState `json:"connectionState,omitempty"` + + // Represents the state of a CatalogSource. Note that Message and Reason represent the original + // status information, which may be migrated to be conditions based in the future. Any new features + // introduced will use conditions. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` } type ConfigMapResourceReference struct { diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go index 4127bc9167..eb4d1635ea 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go @@ -7,10 +7,11 @@ import ( "strings" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - appsv1 "k8s.io/api/apps/v1" rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" "github.com/operator-framework/api/pkg/lib/version" ) @@ -20,6 +21,7 @@ const ( ClusterServiceVersionKind = "ClusterServiceVersion" OperatorGroupNamespaceAnnotationKey = "olm.operatorNamespace" InstallStrategyNameDeployment = "deployment" + SkipRangeAnnotationKey = "olm.skipRange" ) // InstallModeType is a supported type of install mode for CSV installation @@ -48,25 +50,30 @@ type InstallModeSet map[InstallModeType]bool // NamedInstallStrategy represents the block of an ClusterServiceVersion resource // where the install strategy is specified. +// +k8s:openapi-gen=true type NamedInstallStrategy struct { StrategyName string `json:"strategy"` StrategySpec StrategyDetailsDeployment `json:"spec,omitempty"` } // StrategyDeploymentPermissions describe the rbac rules and service account needed by the install strategy +// +k8s:openapi-gen=true type StrategyDeploymentPermissions struct { ServiceAccountName string `json:"serviceAccountName"` Rules []rbac.PolicyRule `json:"rules"` } -// StrategyDeploymentSpec contains the name and spec for the deployment ALM should create +// StrategyDeploymentSpec contains the name, spec and labels for the deployment ALM should create +// +k8s:openapi-gen=true type StrategyDeploymentSpec struct { - Name string `json:"name"` - Spec appsv1.DeploymentSpec `json:"spec"` + Name string `json:"name"` + Spec appsv1.DeploymentSpec `json:"spec"` + Label labels.Set `json:"label,omitempty"` } // StrategyDetailsDeployment represents the parsed details of a Deployment // InstallStrategy. +// +k8s:openapi-gen=true type StrategyDetailsDeployment struct { DeploymentSpecs []StrategyDeploymentSpec `json:"deployments"` Permissions []StrategyDeploymentPermissions `json:"permissions,omitempty"` @@ -159,16 +166,22 @@ const ( ValidatingAdmissionWebhook WebhookAdmissionType = "ValidatingAdmissionWebhook" // MutatingAdmissionWebhook is for mutating admission webhooks MutatingAdmissionWebhook WebhookAdmissionType = "MutatingAdmissionWebhook" + // ConversionWebhook is for conversion webhooks + ConversionWebhook WebhookAdmissionType = "ConversionWebhook" ) // WebhookDescription provides details to OLM about required webhooks // +k8s:openapi-gen=true type WebhookDescription struct { GenerateName string `json:"generateName"` - // +kubebuilder:validation:Enum=ValidatingAdmissionWebhook;MutatingAdmissionWebhook - Type WebhookAdmissionType `json:"type"` - DeploymentName string `json:"deploymentName,omitempty"` + // +kubebuilder:validation:Enum=ValidatingAdmissionWebhook;MutatingAdmissionWebhook;ConversionWebhook + Type WebhookAdmissionType `json:"type"` + DeploymentName string `json:"deploymentName,omitempty"` + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:default=443 ContainerPort int32 `json:"containerPort,omitempty"` + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` Rules []admissionregistrationv1.RuleWithOperations `json:"rules,omitempty"` FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"` MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` @@ -178,6 +191,7 @@ type WebhookDescription struct { AdmissionReviewVersions []string `json:"admissionReviewVersions"` ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"` WebhookPath *string `json:"webhookPath,omitempty"` + ConversionCRDs []string `json:"conversionCRDs,omitempty"` } // GetValidatingWebhook returns a ValidatingWebhook generated from the WebhookDescription @@ -255,6 +269,7 @@ type APIServiceDefinitions struct { // ClusterServiceVersionSpec declarations tell OLM how to install an operator // that can manage apps for a given version. +// +k8s:openapi-gen=true type ClusterServiceVersionSpec struct { InstallStrategy NamedInstallStrategy `json:"install"` Version version.OperatorVersion `json:"version,omitempty"` @@ -293,23 +308,53 @@ type ClusterServiceVersionSpec struct { // Label selector for related resources. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Cleanup specifies the cleanup behaviour when the CSV gets deleted + // +optional + Cleanup CleanupSpec `json:"cleanup,omitempty"` + + // The name(s) of one or more CSV(s) that should be skipped in the upgrade graph. + // Should match the `metadata.Name` field of the CSV that should be skipped. + // This field is only used during catalog creation and plays no part in cluster runtime. + // +optional + Skips []string `json:"skips,omitempty"` + + // List any related images, or other container images that your Operator might require to perform their functions. + // This list should also include operand images as well. All image references should be specified by + // digest (SHA) and not by tag. This field is only used during catalog creation and plays no part in cluster runtime. + // +optional + RelatedImages []RelatedImage `json:"relatedImages,omitempty"` } +// +k8s:openapi-gen=true +type CleanupSpec struct { + Enabled bool `json:"enabled"` +} + +// +k8s:openapi-gen=true type Maintainer struct { Name string `json:"name,omitempty"` Email string `json:"email,omitempty"` } +// +k8s:openapi-gen=true type AppLink struct { Name string `json:"name,omitempty"` URL string `json:"url,omitempty"` } +// +k8s:openapi-gen=true type Icon struct { Data string `json:"base64data"` MediaType string `json:"mediatype"` } +// +k8s:openapi-gen=true +type RelatedImage struct { + Name string `json:"name"` + Image string `json:"image"` +} + // ClusterServiceVersionPhase is a label for the condition of a ClusterServiceVersion at the current time. type ClusterServiceVersionPhase string @@ -369,6 +414,8 @@ const ( CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs ConditionReason = "CannotModifyStaticOperatorGroupProvidedAPIs" CSVReasonDetectedClusterChange ConditionReason = "DetectedClusterChange" CSVReasonInvalidWebhookDescription ConditionReason = "InvalidWebhookDescription" + CSVReasonOperatorConditionNotUpgradeable ConditionReason = "OperatorConditionNotUpgradeable" + CSVReasonWaitingForCleanupToComplete ConditionReason = "WaitingOnCleanup" ) // HasCaResources returns true if the CSV has owned APIServices or Webhooks. @@ -381,6 +428,7 @@ func (c *ClusterServiceVersion) HasCAResources() bool { } // Conditions appear in the status as a record of state transitions on the ClusterServiceVersion +// +k8s:openapi-gen=true type ClusterServiceVersionCondition struct { // Condition of the ClusterServiceVersion Phase ClusterServiceVersionPhase `json:"phase,omitempty"` @@ -436,6 +484,7 @@ const ( ) // DependentStatus is the status for a dependent requirement (to prevent infinite nesting) +// +k8s:openapi-gen=true type DependentStatus struct { Group string `json:"group"` Version string `json:"version"` @@ -445,6 +494,7 @@ type DependentStatus struct { Message string `json:"message,omitempty"` } +// +k8s:openapi-gen=true type RequirementStatus struct { Group string `json:"group"` Version string `json:"version"` @@ -456,8 +506,9 @@ type RequirementStatus struct { Dependents []DependentStatus `json:"dependents,omitempty"` } -// ClusterServiceVersionStatus represents information about the status of a pod. Status may trail the actual +// ClusterServiceVersionStatus represents information about the status of a CSV. Status may trail the actual // state of a system. +// +k8s:openapi-gen=true type ClusterServiceVersionStatus struct { // Current condition of the ClusterServiceVersion Phase ClusterServiceVersionPhase `json:"phase,omitempty"` @@ -484,6 +535,33 @@ type ClusterServiceVersionStatus struct { // Time the owned APIService certs will rotate next // +optional CertsRotateAt *metav1.Time `json:"certsRotateAt,omitempty"` + // CleanupStatus represents information about the status of cleanup while a CSV is pending deletion + // +optional + Cleanup CleanupStatus `json:"cleanup,omitempty"` +} + +// CleanupStatus represents information about the status of cleanup while a CSV is pending deletion +// +k8s:openapi-gen=true +type CleanupStatus struct { + // PendingDeletion is the list of custom resource objects that are pending deletion and blocked on finalizers. + // This indicates the progress of cleanup that is blocking CSV deletion or operator uninstall. + // +optional + PendingDeletion []ResourceList `json:"pendingDeletion,omitempty"` +} + +// ResourceList represents a list of resources which are of the same Group/Kind +// +k8s:openapi-gen=true +type ResourceList struct { + Group string `json:"group"` + Kind string `json:"kind"` + Instances []ResourceInstance `json:"instances"` +} + +// +k8s:openapi-gen=true +type ResourceInstance struct { + Name string `json:"name"` + // Namespace can be empty for cluster-scoped resources + Namespace string `json:"namespace,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go index 3938a475ee..74bc9b819a 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go @@ -1,6 +1,6 @@ +// +groupName=operators.coreos.com // +k8s:deepcopy-gen=package // +k8s:conversion-gen=github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators -// +groupName=operators.coreos.com // Package v1alpha1 contains resources types for version v1alpha1 of the operators.coreos.com API group. package v1alpha1 diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go index 41bbd429f7..5210436d98 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go @@ -14,6 +14,7 @@ const ( ) // Approval is the user approval policy for an InstallPlan. +// It must be one of "Automatic" or "Manual". type Approval string const ( @@ -70,6 +71,7 @@ const ( StepStatusNotPresent StepStatus = "NotPresent" StepStatusPresent StepStatus = "Present" StepStatusCreated StepStatus = "Created" + StepStatusNotCreated StepStatus = "NotCreated" StepStatusWaitingForAPI StepStatus = "WaitingForApi" StepStatusUnsupportedResource StepStatus = "UnsupportedResource" ) @@ -93,6 +95,17 @@ type InstallPlanStatus struct { // AttenuatedServiceAccountRef references the service account that is used // to do scoped operator install. AttenuatedServiceAccountRef *corev1.ObjectReference `json:"attenuatedServiceAccountRef,omitempty"` + + // StartTime is the time when the controller began applying + // the resources listed in the plan to the cluster. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty"` + + // Message is a human-readable message containing detailed + // information that may be important to understanding why the + // plan has its current status. + // +optional + Message string `json:"message,omitempty"` } // InstallPlanCondition represents the overall status of the execution of @@ -215,6 +228,7 @@ func ConditionMet(cond InstallPlanConditionType, now *metav1.Time) InstallPlanCo type Step struct { Resolving string `json:"resolving"` Resource StepResource `json:"resource"` + Optional bool `json:"optional,omitempty"` Status StepStatus `json:"status"` } @@ -261,6 +275,9 @@ type BundleLookup struct { // Conditions represents the overall state of a BundleLookup. // +optional Conditions []BundleLookupCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // The effective properties of the unpacked bundle. + // +optional + Properties string `json:"properties,omitempty"` } // GetCondition returns the BundleLookupCondition of the given type if it exists in the BundleLookup's Conditions. diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go index 4664d14723..e048d4988c 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go @@ -29,13 +29,13 @@ const ( // SubscriptionSpec defines an Application that can be installed type SubscriptionSpec struct { - CatalogSource string `json:"source"` - CatalogSourceNamespace string `json:"sourceNamespace"` - Package string `json:"name"` - Channel string `json:"channel,omitempty"` - StartingCSV string `json:"startingCSV,omitempty"` - InstallPlanApproval Approval `json:"installPlanApproval,omitempty"` - Config SubscriptionConfig `json:"config,omitempty"` + CatalogSource string `json:"source"` + CatalogSourceNamespace string `json:"sourceNamespace"` + Package string `json:"name"` + Channel string `json:"channel,omitempty"` + StartingCSV string `json:"startingCSV,omitempty"` + InstallPlanApproval Approval `json:"installPlanApproval,omitempty"` + Config *SubscriptionConfig `json:"config,omitempty"` } // SubscriptionConfig contains configuration specified for a subscription. @@ -60,7 +60,7 @@ type SubscriptionConfig struct { // Immutable. // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ // +optional - Resources corev1.ResourceRequirements `json:"resources,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` // EnvFrom is a list of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys @@ -102,6 +102,9 @@ const ( // SubscriptionInstallPlanFailed indicates that the installation of a Subscription's InstallPlan has failed. SubscriptionInstallPlanFailed SubscriptionConditionType = "InstallPlanFailed" + + // SubscriptionResolutionFailed indicates that the dependency resolution in the namespace in which the subscription is created has failed + SubscriptionResolutionFailed SubscriptionConditionType = "ResolutionFailed" ) const ( diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go index a3d8a11ec1..c094738eed 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -22,10 +23,12 @@ package v1alpha1 import ( "encoding/json" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -158,7 +161,7 @@ func (in *BundleLookup) DeepCopyInto(out *BundleLookup) { *out = *in if in.CatalogSourceRef != nil { in, out := &in.CatalogSourceRef, &out.CatalogSourceRef - *out = new(corev1.ObjectReference) + *out = new(v1.ObjectReference) **out = **in } if in.Conditions != nil { @@ -306,6 +309,11 @@ func (in *CatalogSourceList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { *out = *in + if in.GrpcPodConfig != nil { + in, out := &in.GrpcPodConfig, &out.GrpcPodConfig + *out = new(GrpcPodConfig) + (*in).DeepCopyInto(*out) + } if in.UpdateStrategy != nil { in, out := &in.UpdateStrategy, &out.UpdateStrategy *out = new(UpdateStrategy) @@ -351,6 +359,13 @@ func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) { *out = new(GRPCConnectionState) (*in).DeepCopyInto(*out) } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus. @@ -363,6 +378,43 @@ func (in *CatalogSourceStatus) DeepCopy() *CatalogSourceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CleanupSpec) DeepCopyInto(out *CleanupSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupSpec. +func (in *CleanupSpec) DeepCopy() *CleanupSpec { + if in == nil { + return nil + } + out := new(CleanupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CleanupStatus) DeepCopyInto(out *CleanupStatus) { + *out = *in + if in.PendingDeletion != nil { + in, out := &in.PendingDeletion, &out.PendingDeletion + *out = make([]ResourceList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CleanupStatus. +func (in *CleanupStatus) DeepCopy() *CleanupStatus { + if in == nil { + return nil + } + out := new(CleanupStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterServiceVersion) DeepCopyInto(out *ClusterServiceVersion) { *out = *in @@ -461,7 +513,7 @@ func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec } if in.NativeAPIs != nil { in, out := &in.NativeAPIs, &out.NativeAPIs - *out = make([]v1.GroupVersionKind, len(*in)) + *out = make([]metav1.GroupVersionKind, len(*in)) copy(*out, *in) } if in.Keywords != nil { @@ -506,9 +558,20 @@ func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) + *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + out.Cleanup = in.Cleanup + if in.Skips != nil { + in, out := &in.Skips, &out.Skips + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RelatedImages != nil { + in, out := &in.RelatedImages, &out.RelatedImages + *out = make([]RelatedImage, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionSpec. @@ -554,6 +617,7 @@ func (in *ClusterServiceVersionStatus) DeepCopyInto(out *ClusterServiceVersionSt in, out := &in.CertsRotateAt, &out.CertsRotateAt *out = (*in).DeepCopy() } + in.Cleanup.DeepCopyInto(&out.Cleanup) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionStatus. @@ -642,6 +706,40 @@ func (in *GRPCConnectionState) DeepCopy() *GRPCConnectionState { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrpcPodConfig) DeepCopyInto(out *GrpcPodConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PriorityClassName != nil { + in, out := &in.PriorityClassName, &out.PriorityClassName + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrpcPodConfig. +func (in *GrpcPodConfig) DeepCopy() *GrpcPodConfig { + if in == nil { + return nil + } + out := new(GrpcPodConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Icon) DeepCopyInto(out *Icon) { *out = *in @@ -845,9 +943,13 @@ func (in *InstallPlanStatus) DeepCopyInto(out *InstallPlanStatus) { } if in.AttenuatedServiceAccountRef != nil { in, out := &in.AttenuatedServiceAccountRef, &out.AttenuatedServiceAccountRef - *out = new(corev1.ObjectReference) + *out = new(v1.ObjectReference) **out = **in } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanStatus. @@ -896,7 +998,7 @@ func (in *RegistryPoll) DeepCopyInto(out *RegistryPoll) { *out = *in if in.Interval != nil { in, out := &in.Interval, &out.Interval - *out = new(v1.Duration) + *out = new(metav1.Duration) **out = **in } } @@ -927,6 +1029,21 @@ func (in *RegistryServiceStatus) DeepCopy() *RegistryServiceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelatedImage) DeepCopyInto(out *RelatedImage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelatedImage. +func (in *RelatedImage) DeepCopy() *RelatedImage { + if in == nil { + return nil + } + out := new(RelatedImage) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RequirementStatus) DeepCopyInto(out *RequirementStatus) { *out = *in @@ -947,6 +1064,41 @@ func (in *RequirementStatus) DeepCopy() *RequirementStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceInstance) DeepCopyInto(out *ResourceInstance) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceInstance. +func (in *ResourceInstance) DeepCopy() *ResourceInstance { + if in == nil { + return nil + } + out := new(ResourceInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceList) DeepCopyInto(out *ResourceList) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]ResourceInstance, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in *ResourceList) DeepCopy() *ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SpecDescriptor) DeepCopyInto(out *SpecDescriptor) { *out = *in @@ -1054,6 +1206,13 @@ func (in *StrategyDeploymentPermissions) DeepCopy() *StrategyDeploymentPermissio func (in *StrategyDeploymentSpec) DeepCopyInto(out *StrategyDeploymentSpec) { *out = *in in.Spec.DeepCopyInto(&out.Spec) + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = make(labels.Set, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentSpec. @@ -1138,7 +1297,7 @@ func (in *SubscriptionCatalogHealth) DeepCopyInto(out *SubscriptionCatalogHealth *out = *in if in.CatalogSourceRef != nil { in, out := &in.CatalogSourceRef, &out.CatalogSourceRef - *out = new(corev1.ObjectReference) + *out = new(v1.ObjectReference) **out = **in } if in.LastUpdated != nil { @@ -1185,7 +1344,7 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) + *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } if in.NodeSelector != nil { @@ -1197,36 +1356,40 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) + *out = make([]v1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - in.Resources.DeepCopyInto(&out.Resources) + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } if in.EnvFrom != nil { in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]corev1.EnvFromSource, len(*in)) + *out = make([]v1.EnvFromSource, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Env != nil { in, out := &in.Env, &out.Env - *out = make([]corev1.EnvVar, len(*in)) + *out = make([]v1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]corev1.Volume, len(*in)) + *out = make([]v1.Volume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]corev1.VolumeMount, len(*in)) + *out = make([]v1.VolumeMount, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1278,7 +1441,11 @@ func (in *SubscriptionList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) { *out = *in - in.Config.DeepCopyInto(&out.Config) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(SubscriptionConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec. @@ -1301,7 +1468,7 @@ func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { } if in.InstallPlanRef != nil { in, out := &in.InstallPlanRef, &out.InstallPlanRef - *out = new(corev1.ObjectReference) + *out = new(v1.ObjectReference) **out = **in } if in.CatalogHealth != nil { @@ -1354,6 +1521,11 @@ func (in *UpdateStrategy) DeepCopy() *UpdateStrategy { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookDescription) DeepCopyInto(out *WebhookDescription) { *out = *in + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } if in.Rules != nil { in, out := &in.Rules, &out.Rules *out = make([]admissionregistrationv1.RuleWithOperations, len(*in)) @@ -1373,7 +1545,7 @@ func (in *WebhookDescription) DeepCopyInto(out *WebhookDescription) { } if in.ObjectSelector != nil { in, out := &in.ObjectSelector, &out.ObjectSelector - *out = new(v1.LabelSelector) + *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } if in.SideEffects != nil { @@ -1401,6 +1573,11 @@ func (in *WebhookDescription) DeepCopyInto(out *WebhookDescription) { *out = new(string) **out = **in } + if in.ConversionCRDs != nil { + in, out := &in.ConversionCRDs, &out.ConversionCRDs + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookDescription. diff --git a/vendor/github.com/operator-framework/api/pkg/operators/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/zz_generated.deepcopy.go deleted file mode 100644 index 8b8760842f..0000000000 --- a/vendor/github.com/operator-framework/api/pkg/operators/zz_generated.deepcopy.go +++ /dev/null @@ -1,1457 +0,0 @@ -// +build !ignore_autogenerated - -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package operators - -import ( - "encoding/json" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIResourceReference) DeepCopyInto(out *APIResourceReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceReference. -func (in *APIResourceReference) DeepCopy() *APIResourceReference { - if in == nil { - return nil - } - out := new(APIResourceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServiceDefinitions) DeepCopyInto(out *APIServiceDefinitions) { - *out = *in - if in.Owned != nil { - in, out := &in.Owned, &out.Owned - *out = make([]APIServiceDescription, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Required != nil { - in, out := &in.Required, &out.Required - *out = make([]APIServiceDescription, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDefinitions. -func (in *APIServiceDefinitions) DeepCopy() *APIServiceDefinitions { - if in == nil { - return nil - } - out := new(APIServiceDefinitions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServiceDescription) DeepCopyInto(out *APIServiceDescription) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]APIResourceReference, len(*in)) - copy(*out, *in) - } - if in.StatusDescriptors != nil { - in, out := &in.StatusDescriptors, &out.StatusDescriptors - *out = make([]StatusDescriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SpecDescriptors != nil { - in, out := &in.SpecDescriptors, &out.SpecDescriptors - *out = make([]SpecDescriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ActionDescriptor != nil { - in, out := &in.ActionDescriptor, &out.ActionDescriptor - *out = make([]ActionDescriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDescription. -func (in *APIServiceDescription) DeepCopy() *APIServiceDescription { - if in == nil { - return nil - } - out := new(APIServiceDescription) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ActionDescriptor) DeepCopyInto(out *ActionDescriptor) { - *out = *in - if in.XDescriptors != nil { - in, out := &in.XDescriptors, &out.XDescriptors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = make(json.RawMessage, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDescriptor. -func (in *ActionDescriptor) DeepCopy() *ActionDescriptor { - if in == nil { - return nil - } - out := new(ActionDescriptor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AppLink) DeepCopyInto(out *AppLink) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppLink. -func (in *AppLink) DeepCopy() *AppLink { - if in == nil { - return nil - } - out := new(AppLink) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BundleLookup) DeepCopyInto(out *BundleLookup) { - *out = *in - if in.CatalogSourceRef != nil { - in, out := &in.CatalogSourceRef, &out.CatalogSourceRef - *out = new(corev1.ObjectReference) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]BundleLookupCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookup. -func (in *BundleLookup) DeepCopy() *BundleLookup { - if in == nil { - return nil - } - out := new(BundleLookup) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BundleLookupCondition) DeepCopyInto(out *BundleLookupCondition) { - *out = *in - if in.LastUpdateTime != nil { - in, out := &in.LastUpdateTime, &out.LastUpdateTime - *out = (*in).DeepCopy() - } - if in.LastTransitionTime != nil { - in, out := &in.LastTransitionTime, &out.LastTransitionTime - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookupCondition. -func (in *BundleLookupCondition) DeepCopy() *BundleLookupCondition { - if in == nil { - return nil - } - out := new(BundleLookupCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CRDDescription) DeepCopyInto(out *CRDDescription) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]APIResourceReference, len(*in)) - copy(*out, *in) - } - if in.StatusDescriptors != nil { - in, out := &in.StatusDescriptors, &out.StatusDescriptors - *out = make([]StatusDescriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SpecDescriptors != nil { - in, out := &in.SpecDescriptors, &out.SpecDescriptors - *out = make([]SpecDescriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ActionDescriptor != nil { - in, out := &in.ActionDescriptor, &out.ActionDescriptor - *out = make([]ActionDescriptor, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDDescription. -func (in *CRDDescription) DeepCopy() *CRDDescription { - if in == nil { - return nil - } - out := new(CRDDescription) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSource) DeepCopyInto(out *CatalogSource) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource. -func (in *CatalogSource) DeepCopy() *CatalogSource { - if in == nil { - return nil - } - out := new(CatalogSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CatalogSource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSourceList) DeepCopyInto(out *CatalogSourceList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CatalogSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceList. -func (in *CatalogSourceList) DeepCopy() *CatalogSourceList { - if in == nil { - return nil - } - out := new(CatalogSourceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CatalogSourceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { - *out = *in - if in.UpdateStrategy != nil { - in, out := &in.UpdateStrategy, &out.UpdateStrategy - *out = new(UpdateStrategy) - (*in).DeepCopyInto(*out) - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - out.Icon = in.Icon -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSpec. -func (in *CatalogSourceSpec) DeepCopy() *CatalogSourceSpec { - if in == nil { - return nil - } - out := new(CatalogSourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) { - *out = *in - if in.ConfigMapResource != nil { - in, out := &in.ConfigMapResource, &out.ConfigMapResource - *out = new(ConfigMapResourceReference) - (*in).DeepCopyInto(*out) - } - if in.RegistryServiceStatus != nil { - in, out := &in.RegistryServiceStatus, &out.RegistryServiceStatus - *out = new(RegistryServiceStatus) - (*in).DeepCopyInto(*out) - } - if in.GRPCConnectionState != nil { - in, out := &in.GRPCConnectionState, &out.GRPCConnectionState - *out = new(GRPCConnectionState) - (*in).DeepCopyInto(*out) - } - if in.LatestImageRegistryPoll != nil { - in, out := &in.LatestImageRegistryPoll, &out.LatestImageRegistryPoll - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus. -func (in *CatalogSourceStatus) DeepCopy() *CatalogSourceStatus { - if in == nil { - return nil - } - out := new(CatalogSourceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterServiceVersion) DeepCopyInto(out *ClusterServiceVersion) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersion. -func (in *ClusterServiceVersion) DeepCopy() *ClusterServiceVersion { - if in == nil { - return nil - } - out := new(ClusterServiceVersion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterServiceVersion) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterServiceVersionCondition) DeepCopyInto(out *ClusterServiceVersionCondition) { - *out = *in - if in.LastUpdateTime != nil { - in, out := &in.LastUpdateTime, &out.LastUpdateTime - *out = (*in).DeepCopy() - } - if in.LastTransitionTime != nil { - in, out := &in.LastTransitionTime, &out.LastTransitionTime - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionCondition. -func (in *ClusterServiceVersionCondition) DeepCopy() *ClusterServiceVersionCondition { - if in == nil { - return nil - } - out := new(ClusterServiceVersionCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterServiceVersionList) DeepCopyInto(out *ClusterServiceVersionList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterServiceVersion, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionList. -func (in *ClusterServiceVersionList) DeepCopy() *ClusterServiceVersionList { - if in == nil { - return nil - } - out := new(ClusterServiceVersionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterServiceVersionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec) { - *out = *in - in.InstallStrategy.DeepCopyInto(&out.InstallStrategy) - in.Version.DeepCopyInto(&out.Version) - in.CustomResourceDefinitions.DeepCopyInto(&out.CustomResourceDefinitions) - in.APIServiceDefinitions.DeepCopyInto(&out.APIServiceDefinitions) - if in.NativeAPIs != nil { - in, out := &in.NativeAPIs, &out.NativeAPIs - *out = make([]v1.GroupVersionKind, len(*in)) - copy(*out, *in) - } - if in.Keywords != nil { - in, out := &in.Keywords, &out.Keywords - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Maintainers != nil { - in, out := &in.Maintainers, &out.Maintainers - *out = make([]Maintainer, len(*in)) - copy(*out, *in) - } - out.Provider = in.Provider - if in.Links != nil { - in, out := &in.Links, &out.Links - *out = make([]AppLink, len(*in)) - copy(*out, *in) - } - if in.Icon != nil { - in, out := &in.Icon, &out.Icon - *out = make([]Icon, len(*in)) - copy(*out, *in) - } - if in.InstallModes != nil { - in, out := &in.InstallModes, &out.InstallModes - *out = make([]InstallMode, len(*in)) - copy(*out, *in) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionSpec. -func (in *ClusterServiceVersionSpec) DeepCopy() *ClusterServiceVersionSpec { - if in == nil { - return nil - } - out := new(ClusterServiceVersionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterServiceVersionStatus) DeepCopyInto(out *ClusterServiceVersionStatus) { - *out = *in - if in.LastUpdateTime != nil { - in, out := &in.LastUpdateTime, &out.LastUpdateTime - *out = (*in).DeepCopy() - } - if in.LastTransitionTime != nil { - in, out := &in.LastTransitionTime, &out.LastTransitionTime - *out = (*in).DeepCopy() - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterServiceVersionCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RequirementStatus != nil { - in, out := &in.RequirementStatus, &out.RequirementStatus - *out = make([]RequirementStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CertsLastUpdated != nil { - in, out := &in.CertsLastUpdated, &out.CertsLastUpdated - *out = (*in).DeepCopy() - } - if in.CertsRotateAt != nil { - in, out := &in.CertsRotateAt, &out.CertsRotateAt - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionStatus. -func (in *ClusterServiceVersionStatus) DeepCopy() *ClusterServiceVersionStatus { - if in == nil { - return nil - } - out := new(ClusterServiceVersionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapResourceReference) DeepCopyInto(out *ConfigMapResourceReference) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapResourceReference. -func (in *ConfigMapResourceReference) DeepCopy() *ConfigMapResourceReference { - if in == nil { - return nil - } - out := new(ConfigMapResourceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomResourceDefinitions) DeepCopyInto(out *CustomResourceDefinitions) { - *out = *in - if in.Owned != nil { - in, out := &in.Owned, &out.Owned - *out = make([]CRDDescription, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Required != nil { - in, out := &in.Required, &out.Required - *out = make([]CRDDescription, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitions. -func (in *CustomResourceDefinitions) DeepCopy() *CustomResourceDefinitions { - if in == nil { - return nil - } - out := new(CustomResourceDefinitions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DependentStatus) DeepCopyInto(out *DependentStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependentStatus. -func (in *DependentStatus) DeepCopy() *DependentStatus { - if in == nil { - return nil - } - out := new(DependentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GRPCConnectionState) DeepCopyInto(out *GRPCConnectionState) { - *out = *in - in.LastConnectTime.DeepCopyInto(&out.LastConnectTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCConnectionState. -func (in *GRPCConnectionState) DeepCopy() *GRPCConnectionState { - if in == nil { - return nil - } - out := new(GRPCConnectionState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Icon) DeepCopyInto(out *Icon) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Icon. -func (in *Icon) DeepCopy() *Icon { - if in == nil { - return nil - } - out := new(Icon) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstallMode) DeepCopyInto(out *InstallMode) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallMode. -func (in *InstallMode) DeepCopy() *InstallMode { - if in == nil { - return nil - } - out := new(InstallMode) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in InstallModeSet) DeepCopyInto(out *InstallModeSet) { - { - in := &in - *out = make(InstallModeSet, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallModeSet. -func (in InstallModeSet) DeepCopy() InstallModeSet { - if in == nil { - return nil - } - out := new(InstallModeSet) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstallPlan) DeepCopyInto(out *InstallPlan) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlan. -func (in *InstallPlan) DeepCopy() *InstallPlan { - if in == nil { - return nil - } - out := new(InstallPlan) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InstallPlan) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstallPlanCondition) DeepCopyInto(out *InstallPlanCondition) { - *out = *in - if in.LastUpdateTime != nil { - in, out := &in.LastUpdateTime, &out.LastUpdateTime - *out = (*in).DeepCopy() - } - if in.LastTransitionTime != nil { - in, out := &in.LastTransitionTime, &out.LastTransitionTime - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanCondition. -func (in *InstallPlanCondition) DeepCopy() *InstallPlanCondition { - if in == nil { - return nil - } - out := new(InstallPlanCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstallPlanList) DeepCopyInto(out *InstallPlanList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]InstallPlan, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanList. -func (in *InstallPlanList) DeepCopy() *InstallPlanList { - if in == nil { - return nil - } - out := new(InstallPlanList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InstallPlanList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstallPlanReference) DeepCopyInto(out *InstallPlanReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanReference. -func (in *InstallPlanReference) DeepCopy() *InstallPlanReference { - if in == nil { - return nil - } - out := new(InstallPlanReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstallPlanSpec) DeepCopyInto(out *InstallPlanSpec) { - *out = *in - if in.ClusterServiceVersionNames != nil { - in, out := &in.ClusterServiceVersionNames, &out.ClusterServiceVersionNames - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanSpec. -func (in *InstallPlanSpec) DeepCopy() *InstallPlanSpec { - if in == nil { - return nil - } - out := new(InstallPlanSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InstallPlanStatus) DeepCopyInto(out *InstallPlanStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]InstallPlanCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CatalogSources != nil { - in, out := &in.CatalogSources, &out.CatalogSources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Plan != nil { - in, out := &in.Plan, &out.Plan - *out = make([]*Step, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Step) - **out = **in - } - } - } - if in.BundleLookups != nil { - in, out := &in.BundleLookups, &out.BundleLookups - *out = make([]BundleLookup, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AttenuatedServiceAccountRef != nil { - in, out := &in.AttenuatedServiceAccountRef, &out.AttenuatedServiceAccountRef - *out = new(corev1.ObjectReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanStatus. -func (in *InstallPlanStatus) DeepCopy() *InstallPlanStatus { - if in == nil { - return nil - } - out := new(InstallPlanStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Maintainer) DeepCopyInto(out *Maintainer) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintainer. -func (in *Maintainer) DeepCopy() *Maintainer { - if in == nil { - return nil - } - out := new(Maintainer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedInstallStrategy) DeepCopyInto(out *NamedInstallStrategy) { - *out = *in - in.StrategySpec.DeepCopyInto(&out.StrategySpec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedInstallStrategy. -func (in *NamedInstallStrategy) DeepCopy() *NamedInstallStrategy { - if in == nil { - return nil - } - out := new(NamedInstallStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorGroup) DeepCopyInto(out *OperatorGroup) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroup. -func (in *OperatorGroup) DeepCopy() *OperatorGroup { - if in == nil { - return nil - } - out := new(OperatorGroup) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OperatorGroup) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorGroupList) DeepCopyInto(out *OperatorGroupList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OperatorGroup, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupList. -func (in *OperatorGroupList) DeepCopy() *OperatorGroupList { - if in == nil { - return nil - } - out := new(OperatorGroupList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OperatorGroupList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorGroupSpec) DeepCopyInto(out *OperatorGroupSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.TargetNamespaces != nil { - in, out := &in.TargetNamespaces, &out.TargetNamespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupSpec. -func (in *OperatorGroupSpec) DeepCopy() *OperatorGroupSpec { - if in == nil { - return nil - } - out := new(OperatorGroupSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OperatorGroupStatus) DeepCopyInto(out *OperatorGroupStatus) { - *out = *in - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ServiceAccountRef != nil { - in, out := &in.ServiceAccountRef, &out.ServiceAccountRef - *out = new(corev1.ObjectReference) - **out = **in - } - if in.LastUpdated != nil { - in, out := &in.LastUpdated, &out.LastUpdated - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupStatus. -func (in *OperatorGroupStatus) DeepCopy() *OperatorGroupStatus { - if in == nil { - return nil - } - out := new(OperatorGroupStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistryPoll) DeepCopyInto(out *RegistryPoll) { - *out = *in - if in.Interval != nil { - in, out := &in.Interval, &out.Interval - *out = new(v1.Duration) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryPoll. -func (in *RegistryPoll) DeepCopy() *RegistryPoll { - if in == nil { - return nil - } - out := new(RegistryPoll) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistryServiceStatus) DeepCopyInto(out *RegistryServiceStatus) { - *out = *in - in.CreatedAt.DeepCopyInto(&out.CreatedAt) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryServiceStatus. -func (in *RegistryServiceStatus) DeepCopy() *RegistryServiceStatus { - if in == nil { - return nil - } - out := new(RegistryServiceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequirementStatus) DeepCopyInto(out *RequirementStatus) { - *out = *in - if in.Dependents != nil { - in, out := &in.Dependents, &out.Dependents - *out = make([]DependentStatus, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequirementStatus. -func (in *RequirementStatus) DeepCopy() *RequirementStatus { - if in == nil { - return nil - } - out := new(RequirementStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SpecDescriptor) DeepCopyInto(out *SpecDescriptor) { - *out = *in - if in.XDescriptors != nil { - in, out := &in.XDescriptors, &out.XDescriptors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = make(json.RawMessage, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecDescriptor. -func (in *SpecDescriptor) DeepCopy() *SpecDescriptor { - if in == nil { - return nil - } - out := new(SpecDescriptor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatusDescriptor) DeepCopyInto(out *StatusDescriptor) { - *out = *in - if in.XDescriptors != nil { - in, out := &in.XDescriptors, &out.XDescriptors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = make(json.RawMessage, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDescriptor. -func (in *StatusDescriptor) DeepCopy() *StatusDescriptor { - if in == nil { - return nil - } - out := new(StatusDescriptor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Step) DeepCopyInto(out *Step) { - *out = *in - out.Resource = in.Resource -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step. -func (in *Step) DeepCopy() *Step { - if in == nil { - return nil - } - out := new(Step) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StepResource) DeepCopyInto(out *StepResource) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepResource. -func (in *StepResource) DeepCopy() *StepResource { - if in == nil { - return nil - } - out := new(StepResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StrategyDeploymentPermissions) DeepCopyInto(out *StrategyDeploymentPermissions) { - *out = *in - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]rbacv1.PolicyRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentPermissions. -func (in *StrategyDeploymentPermissions) DeepCopy() *StrategyDeploymentPermissions { - if in == nil { - return nil - } - out := new(StrategyDeploymentPermissions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StrategyDeploymentSpec) DeepCopyInto(out *StrategyDeploymentSpec) { - *out = *in - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentSpec. -func (in *StrategyDeploymentSpec) DeepCopy() *StrategyDeploymentSpec { - if in == nil { - return nil - } - out := new(StrategyDeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StrategyDetailsDeployment) DeepCopyInto(out *StrategyDetailsDeployment) { - *out = *in - if in.DeploymentSpecs != nil { - in, out := &in.DeploymentSpecs, &out.DeploymentSpecs - *out = make([]StrategyDeploymentSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Permissions != nil { - in, out := &in.Permissions, &out.Permissions - *out = make([]StrategyDeploymentPermissions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ClusterPermissions != nil { - in, out := &in.ClusterPermissions, &out.ClusterPermissions - *out = make([]StrategyDeploymentPermissions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDetailsDeployment. -func (in *StrategyDetailsDeployment) DeepCopy() *StrategyDetailsDeployment { - if in == nil { - return nil - } - out := new(StrategyDetailsDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Subscription) DeepCopyInto(out *Subscription) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Spec != nil { - in, out := &in.Spec, &out.Spec - *out = new(SubscriptionSpec) - (*in).DeepCopyInto(*out) - } - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription. -func (in *Subscription) DeepCopy() *Subscription { - if in == nil { - return nil - } - out := new(Subscription) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Subscription) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubscriptionCatalogHealth) DeepCopyInto(out *SubscriptionCatalogHealth) { - *out = *in - if in.CatalogSourceRef != nil { - in, out := &in.CatalogSourceRef, &out.CatalogSourceRef - *out = new(corev1.ObjectReference) - **out = **in - } - if in.LastUpdated != nil { - in, out := &in.LastUpdated, &out.LastUpdated - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCatalogHealth. -func (in *SubscriptionCatalogHealth) DeepCopy() *SubscriptionCatalogHealth { - if in == nil { - return nil - } - out := new(SubscriptionCatalogHealth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubscriptionCondition) DeepCopyInto(out *SubscriptionCondition) { - *out = *in - if in.LastHeartbeatTime != nil { - in, out := &in.LastHeartbeatTime, &out.LastHeartbeatTime - *out = (*in).DeepCopy() - } - if in.LastTransitionTime != nil { - in, out := &in.LastTransitionTime, &out.LastTransitionTime - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCondition. -func (in *SubscriptionCondition) DeepCopy() *SubscriptionCondition { - if in == nil { - return nil - } - out := new(SubscriptionCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]corev1.EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]corev1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]corev1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]corev1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionConfig. -func (in *SubscriptionConfig) DeepCopy() *SubscriptionConfig { - if in == nil { - return nil - } - out := new(SubscriptionConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Subscription, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList. -func (in *SubscriptionList) DeepCopy() *SubscriptionList { - if in == nil { - return nil - } - out := new(SubscriptionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SubscriptionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) { - *out = *in - in.Config.DeepCopyInto(&out.Config) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec. -func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec { - if in == nil { - return nil - } - out := new(SubscriptionSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { - *out = *in - if in.Install != nil { - in, out := &in.Install, &out.Install - *out = new(InstallPlanReference) - **out = **in - } - if in.InstallPlanRef != nil { - in, out := &in.InstallPlanRef, &out.InstallPlanRef - *out = new(corev1.ObjectReference) - **out = **in - } - if in.CatalogHealth != nil { - in, out := &in.CatalogHealth, &out.CatalogHealth - *out = make([]SubscriptionCatalogHealth, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]SubscriptionCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.LastUpdated.DeepCopyInto(&out.LastUpdated) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus. -func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus { - if in == nil { - return nil - } - out := new(SubscriptionStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) { - *out = *in - if in.RegistryPoll != nil { - in, out := &in.RegistryPoll, &out.RegistryPoll - *out = new(RegistryPoll) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy. -func (in *UpdateStrategy) DeepCopy() *UpdateStrategy { - if in == nil { - return nil - } - out := new(UpdateStrategy) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/operator-framework/operator-lib/LICENSE b/vendor/github.com/operator-framework/operator-lib/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lib/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/operator-framework/operator-lib/proxy/doc.go b/vendor/github.com/operator-framework/operator-lib/proxy/doc.go new file mode 100644 index 0000000000..5b6802a0b1 --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lib/proxy/doc.go @@ -0,0 +1,33 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package proxy implements helper functions to facilitate making operators proxy-aware. + +This package assumes that proxy environment variables `HTTPS_PROXY`, +`HTTP_PROXY`, and/or `NO_PROXY` are set in the Operator environment, typically +via the Operator deployment. + +Proxy-aware operators can use the ReadProxyVarsFromEnv to retrieve these values +as a slice of corev1 EnvVars. Each of the proxy variables are duplicated in +upper and lower case to support applications that use either. In their +reconcile functions, Operator authors are then responsible for setting these +variables in the Container Envs that must use the proxy, For example: + + // Pods with Kubernetes < 1.22 + for i, cSpec := range (myPod.Spec.Containers) { + myPod.Spec.Containers[i].Env = append(cSpec.Env, ReadProxyVarsFromEnv()...) + } +*/ +package proxy diff --git a/vendor/github.com/operator-framework/operator-lib/proxy/proxy.go b/vendor/github.com/operator-framework/operator-lib/proxy/proxy.go new file mode 100644 index 0000000000..fc9b9a863f --- /dev/null +++ b/vendor/github.com/operator-framework/operator-lib/proxy/proxy.go @@ -0,0 +1,45 @@ +// Copyright 2021 The Operator-SDK Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proxy + +import ( + "os" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +// ProxyEnvNames are standard environment variables for proxies +var ProxyEnvNames = []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"} + +// ReadProxyVarsFromEnv retrieves the standard proxy-related environment +// variables from the running environment and returns a slice of corev1 EnvVar +// containing upper and lower case versions of those variables. +func ReadProxyVarsFromEnv() []corev1.EnvVar { + envVars := []corev1.EnvVar{} + for _, s := range ProxyEnvNames { + value, isSet := os.LookupEnv(s) + if isSet { + envVars = append(envVars, corev1.EnvVar{ + Name: s, + Value: value, + }, corev1.EnvVar{ + Name: strings.ToLower(s), + Value: value, + }) + } + } + return envVars +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0e5dc912dd..4b405ae5a9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -155,9 +155,6 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile -# github.com/blang/semver v3.5.1+incompatible -## explicit -github.com/blang/semver # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 @@ -450,11 +447,14 @@ github.com/openshift/library-go/pkg/operator/resource/resourceread github.com/openshift/library-go/pkg/operator/resourcesynccontroller github.com/openshift/library-go/pkg/operator/v1helpers github.com/openshift/library-go/test/library/metrics -# github.com/operator-framework/api v0.3.7-0.20200528122852-759ca0d84007 -## explicit; go 1.13 +# github.com/operator-framework/api v0.15.0 +## explicit; go 1.17 github.com/operator-framework/api/pkg/lib/version github.com/operator-framework/api/pkg/operators github.com/operator-framework/api/pkg/operators/v1alpha1 +# github.com/operator-framework/operator-lib v0.11.0 +## explicit; go 1.17 +github.com/operator-framework/operator-lib/proxy # github.com/operator-framework/operator-sdk v0.18.0 ## explicit; go 1.13 github.com/operator-framework/operator-sdk/version