From 4012aa00a44894f568c6254bcd74939ece177af2 Mon Sep 17 00:00:00 2001 From: Ricardo Maraschini Date: Thu, 10 Jun 2021 15:38:55 +0200 Subject: [PATCH 1/3] Vendoring needed openshift/api dependencies --- ...rsion-operator_01_clusteroperator.crd.yaml | 165 + ...ersion-operator_01_clusterversion.crd.yaml | 335 ++ ...03_config-operator_01_operatorhub.crd.yaml | 106 + .../0000_03_config-operator_01_proxy.crd.yaml | 104 + ...0_10_config-operator_01_apiserver.crd.yaml | 261 ++ ...config-operator_01_authentication.crd.yaml | 162 + .../0000_10_config-operator_01_build.crd.yaml | 401 ++ ...000_10_config-operator_01_console.crd.yaml | 74 + .../0000_10_config-operator_01_dns.crd.yaml | 104 + ...10_config-operator_01_featuregate.crd.yaml | 79 + .../0000_10_config-operator_01_image.crd.yaml | 162 + ...config-operator_01_infrastructure.crd.yaml | 542 +++ ...000_10_config-operator_01_ingress.crd.yaml | 300 ++ ...000_10_config-operator_01_network.crd.yaml | 173 + .../0000_10_config-operator_01_oauth.crd.yaml | 677 +++ ...000_10_config-operator_01_project.crd.yaml | 67 + ...0_10_config-operator_01_scheduler.crd.yaml | 107 + .../github.com/openshift/api/config/v1/doc.go | 8 + .../openshift/api/config/v1/register.go | 70 + .../openshift/api/config/v1/stringsource.go | 31 + .../openshift/api/config/v1/types.go | 312 ++ .../api/config/v1/types_apiserver.go | 158 + .../api/config/v1/types_authentication.go | 151 + .../openshift/api/config/v1/types_build.go | 116 + .../api/config/v1/types_cluster_operator.go | 187 + .../api/config/v1/types_cluster_version.go | 294 ++ .../openshift/api/config/v1/types_console.go | 64 + .../openshift/api/config/v1/types_dns.go | 87 + .../openshift/api/config/v1/types_feature.go | 208 + .../openshift/api/config/v1/types_image.go | 123 + .../api/config/v1/types_infrastructure.go | 557 +++ .../openshift/api/config/v1/types_ingress.go | 181 + .../openshift/api/config/v1/types_network.go | 144 + .../openshift/api/config/v1/types_oauth.go | 562 +++ .../api/config/v1/types_operatorhub.go | 79 + .../openshift/api/config/v1/types_project.go | 54 + .../openshift/api/config/v1/types_proxy.go | 94 + .../api/config/v1/types_scheduling.go | 100 + .../api/config/v1/types_tlssecurityprofile.go | 262 ++ .../api/config/v1/zz_generated.deepcopy.go | 3889 +++++++++++++++++ .../v1/zz_generated.swagger_doc_generated.go | 1552 +++++++ ...0000_10_config-operator_01_config.crd.yaml | 165 + .../0000_12_etcd-operator_01_config.crd.yaml | 231 + ...kube-apiserver-operator_01_config.crd.yaml | 233 + ...piserver-operator_01_config.crd.yaml-patch | 3 + ...roller-manager-operator_01_config.crd.yaml | 235 + ...-manager-operator_01_config.crd.yaml-patch | 3 + ...kube-scheduler-operator_01_config.crd.yaml | 235 + ...cheduler-operator_01_config.crd.yaml-patch | 3 + ...hift-apiserver-operator_01_config.crd.yaml | 171 + ...oud-credential-operator_00_config.crd.yaml | 179 + ...rsion-migrator-operator_00_config.crd.yaml | 160 + ...authentication-operator_01_config.crd.yaml | 169 + ...roller-manager-operator_02_config.crd.yaml | 161 + ...00_50_cluster_storage_operator_01_crd.yaml | 162 + ...ess-operator_00-ingresscontroller.crd.yaml | 1288 ++++++ ...erator_00-ingresscontroller.crd.yaml-patch | 9 + .../0000_50_service-ca-operator_02_crd.yaml | 163 + ...00_70_cluster-network-operator_01_crd.yaml | 626 +++ .../v1/0000_70_console-operator.crd.yaml | 370 ++ ...perator_00-custom-resource-definition.yaml | 206 + ...i_snapshot_controller_operator_01_crd.yaml | 162 + ...0_90_cluster_csi_driver_01_config.crd.yaml | 176 + ...luster_csi_driver_01_config.crd.yaml-patch | 14 + .../openshift/api/operator/v1/doc.go | 7 + .../openshift/api/operator/v1/register.go | 76 + .../openshift/api/operator/v1/types.go | 227 + .../api/operator/v1/types_authentication.go | 51 + .../api/operator/v1/types_cloudcredential.go | 76 + .../openshift/api/operator/v1/types_config.go | 43 + .../api/operator/v1/types_console.go | 228 + .../operator/v1/types_csi_cluster_driver.go | 71 + .../api/operator/v1/types_csi_snapshot.go | 44 + .../openshift/api/operator/v1/types_dns.go | 178 + .../openshift/api/operator/v1/types_etcd.go | 40 + .../api/operator/v1/types_ingress.go | 1170 +++++ .../api/operator/v1/types_kubeapiserver.go | 43 + .../v1/types_kubecontrollermanager.go | 43 + .../v1/types_kubestorageversionmigrator.go | 40 + .../api/operator/v1/types_network.go | 539 +++ .../operator/v1/types_openshiftapiserver.go | 50 + .../v1/types_openshiftcontrollermanager.go | 40 + .../api/operator/v1/types_scheduler.go | 43 + .../api/operator/v1/types_serviceca.go | 42 + .../v1/types_servicecatalogapiserver.go | 42 + .../types_servicecatalogcontrollermanager.go | 42 + .../api/operator/v1/types_storage.go | 44 + .../api/operator/v1/zz_generated.deepcopy.go | 3565 +++++++++++++++ .../v1/zz_generated.swagger_doc_generated.go | 1219 ++++++ ...rator_01_imagecontentsourcepolicy.crd.yaml | 94 + .../openshift/api/operator/v1alpha1/doc.go | 6 + .../api/operator/v1alpha1/register.go | 41 + .../openshift/api/operator/v1alpha1/types.go | 180 + .../types_image_content_source_policy.go | 67 + .../v1alpha1/zz_generated.deepcopy.go | 344 ++ .../zz_generated.swagger_doc_generated.go | 174 + .../operator/clientset/versioned/clientset.go | 95 + .../operator/clientset/versioned/doc.go | 4 + .../versioned/fake/clientset_generated.go | 73 + .../operator/clientset/versioned/fake/doc.go | 4 + .../clientset/versioned/fake/register.go | 42 + .../clientset/versioned/scheme/doc.go | 4 + .../clientset/versioned/scheme/register.go | 42 + .../typed/operator/v1/authentication.go | 168 + .../typed/operator/v1/cloudcredential.go | 168 + .../typed/operator/v1/clustercsidriver.go | 168 + .../versioned/typed/operator/v1/config.go | 168 + .../versioned/typed/operator/v1/console.go | 168 + .../operator/v1/csisnapshotcontroller.go | 168 + .../versioned/typed/operator/v1/dns.go | 168 + .../versioned/typed/operator/v1/doc.go | 4 + .../versioned/typed/operator/v1/etcd.go | 168 + .../versioned/typed/operator/v1/fake/doc.go | 4 + .../operator/v1/fake/fake_authentication.go | 117 + .../operator/v1/fake/fake_cloudcredential.go | 117 + .../operator/v1/fake/fake_clustercsidriver.go | 117 + .../typed/operator/v1/fake/fake_config.go | 117 + .../typed/operator/v1/fake/fake_console.go | 117 + .../v1/fake/fake_csisnapshotcontroller.go | 117 + .../typed/operator/v1/fake/fake_dns.go | 117 + .../typed/operator/v1/fake/fake_etcd.go | 117 + .../v1/fake/fake_ingresscontroller.go | 126 + .../operator/v1/fake/fake_kubeapiserver.go | 117 + .../v1/fake/fake_kubecontrollermanager.go | 117 + .../operator/v1/fake/fake_kubescheduler.go | 117 + .../fake/fake_kubestorageversionmigrator.go | 117 + .../typed/operator/v1/fake/fake_network.go | 117 + .../v1/fake/fake_openshiftapiserver.go | 117 + .../fake/fake_openshiftcontrollermanager.go | 117 + .../operator/v1/fake/fake_operator_client.go | 100 + .../typed/operator/v1/fake/fake_serviceca.go | 117 + .../v1/fake/fake_servicecatalogapiserver.go | 117 + .../fake_servicecatalogcontrollermanager.go | 117 + .../typed/operator/v1/fake/fake_storage.go | 117 + .../typed/operator/v1/generated_expansion.go | 43 + .../typed/operator/v1/ingresscontroller.go | 179 + .../typed/operator/v1/kubeapiserver.go | 168 + .../operator/v1/kubecontrollermanager.go | 168 + .../typed/operator/v1/kubescheduler.go | 168 + .../operator/v1/kubestorageversionmigrator.go | 168 + .../versioned/typed/operator/v1/network.go | 168 + .../typed/operator/v1/openshiftapiserver.go | 168 + .../operator/v1/openshiftcontrollermanager.go | 168 + .../typed/operator/v1/operator_client.go | 168 + .../versioned/typed/operator/v1/serviceca.go | 168 + .../operator/v1/servicecatalogapiserver.go | 168 + .../v1/servicecatalogcontrollermanager.go | 168 + .../versioned/typed/operator/v1/storage.go | 168 + .../versioned/typed/operator/v1alpha1/doc.go | 4 + .../typed/operator/v1alpha1/fake/doc.go | 4 + .../fake/fake_imagecontentsourcepolicy.go | 106 + .../v1alpha1/fake/fake_operator_client.go | 24 + .../operator/v1alpha1/generated_expansion.go | 5 + .../v1alpha1/imagecontentsourcepolicy.go | 152 + .../operator/v1alpha1/operator_client.go | 73 + .../client-go/discovery/fake/discovery.go | 160 + vendor/modules.txt | 11 + 157 files changed, 33937 insertions(+) create mode 100644 vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/doc.go create mode 100644 vendor/github.com/openshift/api/config/v1/register.go create mode 100644 vendor/github.com/openshift/api/config/v1/stringsource.go create mode 100644 vendor/github.com/openshift/api/config/v1/types.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_apiserver.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_authentication.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_build.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_cluster_operator.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_cluster_version.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_console.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_dns.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_feature.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_image.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_infrastructure.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_ingress.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_network.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_oauth.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_operatorhub.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_project.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_proxy.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_scheduling.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/operator/v1/doc.go create mode 100644 vendor/github.com/openshift/api/operator/v1/register.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_authentication.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_config.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_console.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_dns.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_etcd.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_ingress.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_network.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_scheduler.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_serviceca.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go create mode 100644 vendor/github.com/openshift/api/operator/v1/types_storage.go create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/types.go create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/clientset.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/doc.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/clientset_generated.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/register.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/authentication.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/cloudcredential.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/clustercsidriver.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/config.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/console.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/csisnapshotcontroller.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/dns.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/doc.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/etcd.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_authentication.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_cloudcredential.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_clustercsidriver.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_config.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_console.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_csisnapshotcontroller.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_dns.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_etcd.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_ingresscontroller.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubeapiserver.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubecontrollermanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubescheduler.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubestorageversionmigrator.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_network.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_openshiftapiserver.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_openshiftcontrollermanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_serviceca.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_servicecatalogapiserver.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_servicecatalogcontrollermanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_storage.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/ingresscontroller.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubeapiserver.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubecontrollermanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubescheduler.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubestorageversionmigrator.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/network.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/openshiftapiserver.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/openshiftcontrollermanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/serviceca.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/servicecatalogapiserver.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/servicecatalogcontrollermanager.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/storage.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_imagecontentsourcepolicy.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/imagecontentsourcepolicy.go create mode 100644 vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go create mode 100644 vendor/k8s.io/client-go/discovery/fake/discovery.go diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml new file mode 100644 index 0000000000..7e265559ee --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml @@ -0,0 +1,165 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/497 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: clusteroperators.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterOperator + listKind: ClusterOperatorList + plural: clusteroperators + shortNames: + - co + singular: clusteroperator + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version + type: string + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterOperator is the Custom Resource object which holds the + current state of an operator. This object is used by operators to convey + their state to the rest of the cluster. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds configuration that could apply to any operator. + type: object + status: + description: status holds the information about the state of an operator. It + is consistent with status information across the Kubernetes ecosystem. + type: object + properties: + conditions: + description: conditions describes the state of the operator's managed + and monitored components. + type: array + items: + description: ClusterOperatorStatusCondition represents the state + of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the + current condition. This is only to be consumed by humans. It + may contain Line Feed characters (U+000A), which should be + rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's + current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + extension: + description: extension contains any additional status information + specific to the operator which owns this status object. + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + relatedObjects: + description: 'relatedObjects is a list of objects that are "interesting" + or related to this operator. Common uses are: 1. the detailed resource + driving the operator 2. operator namespaces 3. operand namespaces' + type: array + items: + description: ObjectReference contains enough information to let + you inspect or modify the referred object. + type: object + required: + - group + - name + - resource + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + versions: + description: versions is a slice of operator and operand version tuples. Operators + which manage multiple operands will have multiple operand entries + in the array. Available operators must report the version of the + operator itself with the name "operator". An operator reports a + new "operator" version when it has rolled out the new version to + all of its operands. + type: array + items: + type: object + required: + - name + - version + properties: + name: + description: name is the name of the particular operand this + version is for. It usually matches container images, not + operators. + type: string + version: + description: version indicates which version of a particular + operand is currently being managed. It must always match + the Available operand. If 1.0.0 is Available, then this must + indicate 1.0.0 even if the operator is trying to rollout 1.1.0 + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml new file mode 100644 index 0000000000..fbc45d3628 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -0,0 +1,335 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/495 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: clusterversions.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterVersion + plural: clusterversions + singular: clusterversion + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: ClusterVersion is the configuration for the ClusterVersionOperator. + This is where parameters related to automatic updates can be set. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the cluster version - the operator + will work to ensure that the desired version is applied to the cluster. + type: object + required: + - clusterID + properties: + channel: + description: channel is an identifier for explicitly requesting that + a non-default set of updates be applied to this cluster. The default + channel will be contain stable updates that are appropriate for + production clusters. + type: string + clusterID: + description: clusterID uniquely identifies this cluster. This is expected + to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + in hexadecimal values). This is a required field. + type: string + desiredUpdate: + description: "desiredUpdate is an optional field that indicates the + desired value of the cluster version. Setting this value will trigger + an upgrade (if the current version does not match the desired version). + The set of recommended update values is listed as part of available + updates in status, and setting values outside that range may cause + the upgrade to fail. You may specify the version field without setting + image if an update exists with that version in the availableUpdates + or history. \n If an upgrade fails the operator will halt and report + status about the failing component. Setting the desired update value + back to the previous version will cause a rollback to be attempted. + Not all rollbacks will succeed." + type: object + properties: + force: + description: "force allows an administrator to update to an image + that has failed verification, does not appear in the availableUpdates + list, or otherwise would be blocked by normal protections on + update. This option should only be used when the authenticity + of the provided image has been verified out of band because + the provided image will run with full administrative access + to the cluster. Do not use this flag with images that comes + from unknown or potentially malicious sources. \n This flag + does not override other forms of consistency checking that are + required before a new update is deployed." + type: boolean + image: + description: image is a container image location that contains + the update. When this field is part of spec, image is optional + if version is specified and the availableUpdates field contains + a matching version. + type: string + version: + description: version is a semantic versioning identifying the + update version. When this field is part of spec, version is + optional if image is specified. + type: string + overrides: + description: overrides is list of overides for components that are + managed by cluster version operator. Marking a component unmanaged + will prevent the operator from creating or updating the object. + type: array + items: + description: ComponentOverride allows overriding cluster version + operator's behavior for a component. + type: object + required: + - group + - kind + - name + - namespace + - unmanaged + properties: + group: + description: group identifies the API group that the kind is + in. + type: string + kind: + description: kind indentifies which object to override. + type: string + name: + description: name is the component's name. + type: string + namespace: + description: namespace is the component's namespace. If the + resource is cluster scoped, the namespace should be empty. + type: string + unmanaged: + description: 'unmanaged controls if cluster version operator + should stop managing the resources in this cluster. Default: + false' + type: boolean + upstream: + description: upstream may be used to specify the preferred update + server. By default it will use the appropriate update server for + the cluster and region. + type: string + status: + description: status contains information about the available updates and + any in-progress updates. + type: object + required: + - availableUpdates + - desired + - observedGeneration + - versionHash + properties: + availableUpdates: + description: availableUpdates contains the list of updates that are + appropriate for this cluster. This list may be empty if no updates + are recommended, if the update service is unavailable, or if an + invalid channel has been specified. + type: array + items: + description: Release represents an OpenShift release image and associated + metadata. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which + the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains + the update. When this field is part of spec, image is optional + if version is specified and the availableUpdates field contains + a matching version. + type: string + url: + description: url contains information about this release. This + URL is set by the 'url' metadata property on a release or + the metadata returned by the update API and should be displayed + as a link in user interfaces. The URL field may not be set + for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the + update version. When this field is part of spec, version is + optional if image is specified. + type: string + nullable: true + conditions: + description: conditions provides information about the cluster version. + The condition "Available" is set to true if the desiredUpdate has + been reached. The condition "Progressing" is set to true if an update + is being applied. The condition "Degraded" is set to true if an + update is currently blocked by a temporary or permanent error. Conditions + are only valid for the current desiredUpdate when metadata.generation + is equal to status.generation. + type: array + items: + description: ClusterOperatorStatusCondition represents the state + of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the + current condition. This is only to be consumed by humans. It + may contain Line Feed characters (U+000A), which should be + rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's + current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + desired: + description: desired is the version that the cluster is reconciling + towards. If the cluster is not yet fully initialized desired will + be set with the information available, which may be an image or + a tag. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which + the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains + the update. When this field is part of spec, image is optional + if version is specified and the availableUpdates field contains + a matching version. + type: string + url: + description: url contains information about this release. This + URL is set by the 'url' metadata property on a release or the + metadata returned by the update API and should be displayed + as a link in user interfaces. The URL field may not be set for + test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the + update version. When this field is part of spec, version is + optional if image is specified. + type: string + history: + description: history contains a list of the most recent versions applied + to the cluster. This value may be empty during cluster startup, + and then will be updated when a new update is being applied. The + newest update is first in the list and it is ordered by recency. + Updates in the history have state Completed if the rollout completed + - if an update was failing or halfway applied the state will be + Partial. Only a limited amount of update history is preserved. + type: array + items: + description: UpdateHistory is a single attempted update to the cluster. + type: object + required: + - completionTime + - image + - startedTime + - state + - verified + properties: + completionTime: + description: completionTime, if set, is when the update was + fully applied. The update that is currently being applied + will have a null completion time. Completion time will always + be set for entries that are not the current update (usually + to the started time of the next update). + type: string + format: date-time + nullable: true + image: + description: image is a container image location that contains + the update. This value is always populated. + type: string + startedTime: + description: startedTime is the time at which the update was + started. + type: string + format: date-time + state: + description: state reflects whether the update was fully applied. + The Partial state indicates the update is not fully applied, + while the Completed state indicates the update was successfully + rolled out at least once (all parts of the update successfully + applied). + type: string + verified: + description: verified indicates whether the provided update + was properly verified before it was installed. If this is + false the cluster may not be trusted. + type: boolean + version: + description: version is a semantic versioning identifying the + update version. If the requested image does not define a version, + or if a failure occurs retrieving the image, this value may + be empty. + type: string + observedGeneration: + description: observedGeneration reports which version of the spec + is being synced. If this value is not equal to metadata.generation, + then the desired and conditions fields may represent a previous + version. + type: integer + format: int64 + versionHash: + description: versionHash is a fingerprint of the content that the + cluster will be updated with. It is used by the operator to avoid + unnecessary work and is for internal use only. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml new file mode 100644 index 0000000000..dee8a50130 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml @@ -0,0 +1,106 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: operatorhubs.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OperatorHub + listKind: OperatorHubList + plural: operatorhubs + singular: operatorhub + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OperatorHub is the Schema for the operatorhubs API. It can be + used to change the state of the default hub sources for OperatorHub on the + cluster from enabled to disabled and vice versa. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperatorHubSpec defines the desired state of OperatorHub + type: object + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable all the + default hub sources. If this is true, a specific entry in sources + can be used to enable a default source. If this is false, a specific + entry in sources can be used to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and their + configuration. If the list is empty, it implies that the default + hub sources are enabled on the cluster unless disableAllDefaultSources + is true. If disableAllDefaultSources is true and sources is not + empty, the configuration present in sources will take precedence. + The list of default hub sources and their current state will always + be reflected in the status block. + type: array + items: + description: HubSource is used to specify the hub source and its + configuration + type: object + properties: + disabled: + description: disabled is used to disable a default hub source + on cluster + type: boolean + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: OperatorHubStatus defines the observed state of OperatorHub. + The current state of the default hub sources will always be reflected + here. + type: object + properties: + sources: + description: sources encapsulates the result of applying the configuration + for each hub source + type: array + items: + description: HubSourceStatus is used to reflect the current state + of applying the configuration to a default source + type: object + properties: + disabled: + description: disabled is used to disable a default hub source + on cluster + type: boolean + message: + description: message provides more information regarding failures + type: string + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: status indicates success or failure in applying + the configuration + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml new file mode 100644 index 0000000000..ab7307a45a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml @@ -0,0 +1,104 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: proxies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Proxy + listKind: ProxyList + plural: proxies + singular: proxy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Proxy holds cluster-wide information on how to configure default + proxies for the cluster. The canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the proxy configuration + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty + means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty + means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or + CIDRs for which the proxy should not be used. Empty means unset + and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify + readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a + CA certificate bundle. The trustedCA field should only be consumed + by a proxy validator. The validator is responsible for reading the + certificate bundle from the required key \"ca-bundle.crt\", merging + it with the system default trust bundle, and writing the merged + trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" + namespace. Clients that expect to make proxy connections must use + the trusted-ca-bundle for all HTTPS requests to the proxy, and may + use the trusted-ca-bundle for non-proxy HTTPS requests as well. + \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". + Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: + ConfigMap metadata: name: user-ca-bundle namespace: openshift-config + \ data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom + CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or + CIDRs for which the proxy should not be used. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml new file mode 100644 index 0000000000..f7534c0668 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml @@ -0,0 +1,261 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: apiservers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: APIServer + listKind: APIServerList + plural: apiservers + singular: apiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: APIServer holds configuration (like serving certificates, client + CA and CORS domains) shared by all API servers in the system, among them + especially kube-apiserver and openshift-apiserver. The canonical name of + an instance is 'cluster'. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalCORSAllowedOrigins: + description: additionalCORSAllowedOrigins lists additional, user-defined + regular expressions describing hosts for which the API server allows + access using the CORS headers. This may be needed to access the + API and the integrated OAuth server from JavaScript applications. + The values are regular expressions that correspond to the Golang + regular expression language. + type: array + items: + type: string + audit: + description: audit specifies the settings for audit configuration + to be applied to all OpenShift-provided API servers in the cluster. + type: object + default: + profile: Default + properties: + profile: + description: "profile specifies the name of the desired audit + policy configuration to be deployed to all OpenShift-provided + API servers in the cluster. \n The following profiles are provided: + - Default: the existing default policy. - WriteRequestBodies: + like 'Default', but logs request and response HTTP payloads + for write requests (create, update, patch). - AllRequestBodies: + like 'WriteRequestBodies', but also logs request and response + HTTP payloads for read requests (get, list). \n If unset, the + 'Default' profile is used as the default." + type: string + default: Default + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + clientCA: + description: 'clientCA references a ConfigMap containing a certificate + bundle for the signers that will be recognized for incoming client + certificates in addition to the operator managed signers. If this + is empty, then only operator managed signers are valid. You usually + only have to set this if you have your own PKI you wish to honor + client certificates from. The ConfigMap must exist in the openshift-config + namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] + - CA bundle.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + encryption: + description: encryption allows the configuration of encryption of + resources at the datastore layer. + type: object + properties: + type: + description: "type defines what encryption type should be used + to encrypt resources at the datastore layer. When this field + is unset (i.e. when it is set to the empty string), identity + is implied. The behavior of unset can and will change over time. + \ Even if encryption is enabled by default, the meaning of unset + may change to a different encryption type based on changes in + best practices. \n When encryption is enabled, all sensitive + resources shipped with the platform are encrypted. This list + of sensitive resources can and will change over time. The current + authoritative list is: \n 1. secrets 2. configmaps 3. + routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io + \ 5. oauthauthorizetokens.oauth.openshift.io" + type: string + enum: + - "" + - identity + - aescbc + servingCerts: + description: servingCert is the TLS cert info for serving secure traffic. + If not specified, operator managed certificates will be used for + serving secure traffic. + type: object + properties: + namedCertificates: + description: namedCertificates references secrets containing the + TLS cert info for serving secure traffic to specific hostnames. + If no named certificates are provided, or no named certificates + match the server name as understood by a client, the defaultServingCertificate + will be used. + type: array + items: + description: APIServerNamedServingCert maps a server DNS name, + as understood by a client, to a certificate. + type: object + properties: + names: + description: names is a optional list of explicit DNS names + (leading wildcards allowed) that should use this certificate + to serve secure traffic. If no names are provided, the + implicit names will be extracted from the certificates. + Exact names trump over wildcard names. Explicit names + defined here trump over extracted implicit names. + type: array + items: + type: string + servingCertificate: + description: 'servingCertificate references a kubernetes.io/tls + type secret containing the TLS cert info for serving secure + traffic. The secret must exist in the openshift-config + namespace and contain the following required fields: - + Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] + - TLS certificate.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections + for externally exposed servers. \n If unset, a default (which may + change between releases) is chosen. Note that only Old and Intermediate + profiles are currently supported, and the maximum available MinTLSVersions + is VersionTLS12." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be + extremely careful using a custom profile as invalid configurations + can be catastrophic. An example custom profile looks like this: + \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 + \ minTLSVersion: TLSv1.1" + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms + that are negotiated during the TLS handshake. Operators + may remove entries their operands do not support. For example, + to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal + version of the TLS protocol that is negotiated during the + TLS handshake. For example, to use TLS versions 1.1, 1.2 + and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently + the highest minTLSVersion allowed is VersionTLS12" + type: string + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: + \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 + \ minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 + \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 + \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA + \ - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 + \ - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA + \ - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - + DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 + \ - AES128-SHA256 - AES256-SHA256 - AES128-SHA - + AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. + Custom provides the ability to specify individual TLS security + profile parameters. Old, Intermediate and Modern are TLS security + profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + \n The profiles are intent based, so they may change over time + as new ciphers are developed and existing ciphers are found + to be insecure. Depending on precisely which ciphers are available + to a process, the list may be reduced. \n Note that the Modern + profile is currently not supported because it is not yet well + adopted by common software libraries." + type: string + enum: + - Old + - Intermediate + - Modern + - Custom + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml new file mode 100644 index 0000000000..910a4c65b1 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml @@ -0,0 +1,162 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Authentication specifies cluster-wide settings for authentication + (like OAuth and webhook token authenticators). The canonical name of an + instance is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for + OAuth 2.0 Authorization Server Metadata for an external OAuth server. + This discovery document can be viewed from its served location: + oc get --raw ''/.well-known/oauth-authorization-server'' For further + details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + If oauthMetadata.name is non-empty, this value has precedence over + any metadata reference stored in status. The key "oauthMetadata" + is used to locate the data. If specified and the config map or expected + key is not found, no metadata is served. If the specified metadata + is not valid, no metadata is served. The namespace for this config + map is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound + service account token issuer. The default is https://kubernetes.default.svc + WARNING: Updating this field will result in the invalidation of + all bound tokens with the previous issuer value. Unless the holder + of a bound token has explicit support for a change in issuer, they + will not request a new bound token until pod restart or until their + existing token exceeds 80% of its duration.' + type: string + type: + description: type identifies the cluster managed, user facing authentication + mode in use. Specifically, it manages the component that responds + to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: webhookTokenAuthenticator configures a remote token reviewer. + These remote authentication webhooks can be used to verify bearer + tokens via the tokenreviews.authentication.k8s.io REST API. This + is required to honor bearer tokens that are provisioned by an external + authentication service. + type: object + required: + - kubeConfig + properties: + kubeConfig: + description: "kubeConfig references a secret that contains kube + config file data which describes how to access the remote webhook + service. The namespace for the referenced secret is openshift-config. + \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + \n The key \"kubeConfig\" is used to locate the data. If the + secret or expected key is not found, the webhook is not honored. + If the specified kube config data is not valid, the webhook + is not honored." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it + has no effect. + type: array + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary + configuration options for a remote token authenticator. It's the + same as WebhookTokenAuthenticator but it's missing the 'required' + validation on KubeConfig field. + type: object + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which + describes how to access the remote webhook service. For further + details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + The key "kubeConfig" is used to locate the data. If the secret + or expected key is not found, the webhook is not honored. + If the specified kube config data is not valid, the webhook + is not honored. The namespace for this secret is determined + by the point of use.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint + data for OAuth 2.0 Authorization Server Metadata for the in-cluster + integrated OAuth server. This discovery document can be viewed from + its served location: oc get --raw ''/.well-known/oauth-authorization-server'' + For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + This contains the observed value based on cluster state. An explicitly + set value in spec.oauthMetadata has precedence over this field. + This field has no meaning if authentication spec.type is not set + to IntegratedOAuth. The key "oauthMetadata" is used to locate the + data. If the config map or expected key is not found, no metadata + is served. If the specified metadata is not valid, no metadata is + served. The namespace for this config map is openshift-config-managed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml new file mode 100644 index 0000000000..5c67235fe9 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -0,0 +1,401 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: builds.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Build + listKind: BuildList + plural: builds + singular: build + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Build configures the behavior of OpenShift builds for the entire + cluster. This includes default settings that can be overridden in BuildConfig + objects, and overrides which are applied to all builds. \n The canonical + name is \"cluster\"" + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the build controller + configuration + type: object + properties: + additionalTrustedCA: + description: "AdditionalTrustedCA is a reference to a ConfigMap containing + additional CAs that should be trusted for image pushes and pulls + during builds. The namespace for this config map is openshift-config. + \n DEPRECATED: Additional CAs for image pull and push should be + set on image.config.openshift.io/cluster instead." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + buildDefaults: + description: BuildDefaults controls the default information for Builds + type: object + properties: + defaultProxy: + description: "DefaultProxy contains the default proxy settings + for all build operations, including image pull/push and source + download. \n Values can be overrode by setting the `HTTP_PROXY`, + `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build + config's strategy." + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty + means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS + requests. Empty means unset and will not result in an env + var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames + and/or CIDRs for which the proxy should not be used. Empty + means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used + to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing + a CA certificate bundle. The trustedCA field should only + be consumed by a proxy validator. The validator is responsible + for reading the certificate bundle from the required key + \"ca-bundle.crt\", merging it with the system default trust + bundle, and writing the merged trust bundle to a ConfigMap + named \"trusted-ca-bundle\" in the \"openshift-config-managed\" + namespace. Clients that expect to make proxy connections + must use the trusted-ca-bundle for all HTTPS requests to + the proxy, and may use the trusted-ca-bundle for non-proxy + HTTPS requests as well. \n The namespace for the ConfigMap + referenced by trustedCA is \"openshift-config\". Here is + an example ConfigMap (in yaml): \n apiVersion: v1 kind: + ConfigMap metadata: name: user-ca-bundle namespace: openshift-config + \ data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- + \ Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + env: + description: Env is a set of default environment variables that + will be applied to the build if the specified variables do not + exist on the build + type: array + items: + description: EnvVar represents an environment variable present + in a Container. + type: object + required: + - name + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + type: object + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + gitProxy: + description: "GitProxy contains the proxy settings for git operations + only. If set, this will override any Proxy settings for all + git commands, such as git clone. \n Values that are not set + here will be inherited from DefaultProxy." + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty + means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS + requests. Empty means unset and will not result in an env + var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames + and/or CIDRs for which the proxy should not be used. Empty + means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used + to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing + a CA certificate bundle. The trustedCA field should only + be consumed by a proxy validator. The validator is responsible + for reading the certificate bundle from the required key + \"ca-bundle.crt\", merging it with the system default trust + bundle, and writing the merged trust bundle to a ConfigMap + named \"trusted-ca-bundle\" in the \"openshift-config-managed\" + namespace. Clients that expect to make proxy connections + must use the trusted-ca-bundle for all HTTPS requests to + the proxy, and may use the trusted-ca-bundle for non-proxy + HTTPS requests as well. \n The namespace for the ConfigMap + referenced by trustedCA is \"openshift-config\". Here is + an example ConfigMap (in yaml): \n apiVersion: v1 kind: + ConfigMap metadata: name: user-ca-bundle namespace: openshift-config + \ data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- + \ Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + imageLabels: + description: ImageLabels is a list of docker labels that are applied + to the resulting image. User can override a default label by + providing a label with the same name in their Build/BuildConfig. + type: array + items: + type: object + properties: + name: + description: Name defines the name of the label. It must + have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + resources: + description: Resources defines resource requirements to execute + the build. + type: object + properties: + limits: + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + buildOverrides: + description: BuildOverrides controls override settings for builds + type: object + properties: + forcePull: + description: ForcePull overrides, if set, the equivalent value + in the builds, i.e. false disables force pull for all builds, + true enables force pull for all builds, independently of what + each build specifies itself + type: boolean + imageLabels: + description: ImageLabels is a list of docker labels that are applied + to the resulting image. If user provided a label in their Build/BuildConfig + with the same name as one in this list, the user's label will + be overwritten. + type: array + items: + type: object + properties: + name: + description: Name defines the name of the label. It must + have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + nodeSelector: + description: NodeSelector is a selector which must be true for + the build pod to fit on a node + type: object + additionalProperties: + type: string + tolerations: + description: Tolerations is a list of Tolerations that will override + any existing tolerations set on a build pod. + type: array + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml new file mode 100644 index 0000000000..2e3a826ef6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml @@ -0,0 +1,74 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consoles.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Console + listKind: ConsoleList + plural: consoles + singular: console + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Console holds cluster-wide configuration for the web console, + including the logout URL, and reports the public URL of the console. The + canonical name is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + authentication: + description: ConsoleAuthentication defines a list of optional configuration + for console authentication. + type: object + properties: + logoutRedirect: + description: 'An optional, absolute URL to redirect web browsers + to after logging out of the console. If not specified, it will + redirect to the default login page. This is required when using + an identity provider that supports single sign-on (SSO) such + as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, + SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console + will destroy the user''s token. The logoutRedirect provides + the user the option to perform single logout (SLO) through the + identity provider to destroy their single sign-on session.' + type: string + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + consoleURL: + description: The URL for the console. This will be derived from the + host for the route that is created for the console. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml new file mode 100644 index 0000000000..7550f6e28b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml @@ -0,0 +1,104 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: DNS holds cluster-wide information about DNS. The canonical name + is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + baseDomain: + description: "baseDomain is the base domain of the cluster. All managed + DNS records will be sub-domains of this base. \n For example, given + the base domain `openshift.example.com`, an API server DNS record + may be created for `cluster-api.openshift.example.com`. \n Once + set, this field cannot be changed." + type: string + privateZone: + description: "privateZone is the location where all the DNS records + that are only available internally to the cluster exist. \n If this + field is nil, no private records should be created. \n Once set, + this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the + DNS hosted zone. \n on AWS zone can be fetched using `ID` as + id in [1] on Azure zone can be fetched using `ID` as a pre-determined + name in [2], on GCP zone can be fetched using `ID` as a pre-determined + name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n + on AWS, resourcegroupstaggingapi [1] can be used to fetch a + zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + publicZone: + description: "publicZone is the location where all the DNS records + that are publicly accessible to the internet exist. \n If this field + is nil, no public records should be created. \n Once set, this field + cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the + DNS hosted zone. \n on AWS zone can be fetched using `ID` as + id in [1] on Azure zone can be fetched using `ID` as a pre-determined + name in [2], on GCP zone can be fetched using `ID` as a pre-determined + name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n + on AWS, resourcegroupstaggingapi [1] can be used to fetch a + zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml new file mode 100644 index 0000000000..10f85be096 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml @@ -0,0 +1,79 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: featuregates.config.openshift.io +spec: + group: config.openshift.io + names: + kind: FeatureGate + listKind: FeatureGateList + plural: featuregates + singular: featuregate + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Feature holds cluster-wide information about feature gates. The + canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + customNoUpgrade: + description: customNoUpgrade allows the enabling or disabling of any + feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE + UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting + cannot be validated. If you have any typos or accidentally apply + invalid combinations your cluster may fail in an unrecoverable way. featureSet + must equal "CustomNoUpgrade" must be set to use this field. + type: object + properties: + disabled: + description: disabled is a list of all feature gates that you + want to force off + type: array + items: + type: string + enabled: + description: enabled is a list of all feature gates that you want + to force on + type: array + items: + type: string + nullable: true + featureSet: + description: featureSet changes the list of features in the cluster. The + default is empty. Be very careful adjusting this setting. Turning + on or off features may cause irreversible changes in your cluster + which cannot be undone. + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml new file mode 100644 index 0000000000..71ddd49afc --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml @@ -0,0 +1,162 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: images.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Image + listKind: ImageList + plural: images + singular: image + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Image governs policies related to imagestream imports and runtime + configuration for external registries. It allows cluster admins to configure + which registries OpenShift is allowed to import images from, extra CA trust + bundles for external registries, and policies to block or allow registry + hostnames. When exposing OpenShift's image registry to the public, this + also lets cluster admins specify the external hostname. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalTrustedCA: + description: additionalTrustedCA is a reference to a ConfigMap containing + additional CAs that should be trusted during imagestream import, + pod image pull, build image pull, and imageregistry pullthrough. + The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + allowedRegistriesForImport: + description: allowedRegistriesForImport limits the container image + registries that normal users may import images from. Set this list + to the registries that you trust to contain valid Docker images + and that you want applications to be able to import from. Users + with permission to create Images or ImageStreamMappings via the + API are not affected by this policy - typically only administrators + or system integrations will have those permissions. + type: array + items: + description: RegistryLocation contains a location of the registry + specified by the registry domain name. The domain name might include + wildcards, like '*' or '??'. + type: object + properties: + domainName: + description: domainName specifies a domain name for the registry + In case the registry use non-standard (80 or 443) port, the + port should be included in the domain name as well. + type: string + insecure: + description: insecure indicates whether the registry is secure + (https) or insecure (http) By default (if not specified) the + registry is assumed as secure. + type: boolean + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for + the default external image registry. The external hostname should + be set only when the image registry is exposed externally. The first + value is used in 'publicDockerImageRepository' field in ImageStreams. + The value must be in "hostname[:port]" format. + type: array + items: + type: string + registrySources: + description: registrySources contains configuration that determines + how the container runtime should treat individual registries when + accessing images for builds+pods. (e.g. whether or not to allow + insecure access). It does not contain configuration for the internal + cluster registry. + type: object + properties: + allowedRegistries: + description: "allowedRegistries are the only registries permitted + for image pull and push actions. All other registries are denied. + \n Only one of BlockedRegistries or AllowedRegistries may be + set." + type: array + items: + type: string + blockedRegistries: + description: "blockedRegistries cannot be used for image pull + and push actions. All other registries are permitted. \n Only + one of BlockedRegistries or AllowedRegistries may be set." + type: array + items: + type: string + containerRuntimeSearchRegistries: + description: 'containerRuntimeSearchRegistries are registries + that will be searched when pulling images that do not have fully + qualified domains in their pull specs. Registries will be searched + in the order provided in the list. Note: this search list only + works with the container runtime, i.e CRI-O. Will NOT work with + builds or imagestream imports.' + type: array + format: hostname + minItems: 1 + items: + type: string + x-kubernetes-list-type: set + insecureRegistries: + description: insecureRegistries are registries which do not have + a valid TLS certificates or only support HTTP connections. + type: array + items: + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for + the default external image registry. The external hostname should + be set only when the image registry is exposed externally. The first + value is used in 'publicDockerImageRepository' field in ImageStreams. + The value must be in "hostname[:port]" format. + type: array + items: + type: string + internalRegistryHostname: + description: internalRegistryHostname sets the hostname for the default + internal image registry. The value must be in "hostname[:port]" + format. This value is set by the image registry operator which controls + the internal registry hostname. For backward compatibility, users + can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but + this setting overrides the environment variable. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml new file mode 100644 index 0000000000..9205a4347b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml @@ -0,0 +1,542 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: infrastructures.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Infrastructure + listKind: InfrastructureList + plural: infrastructures + singular: infrastructure + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Infrastructure holds cluster-wide information about Infrastructure. The + canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing + the cloud provider configuration file. This configuration file is + used to configure the Kubernetes cloud provider integration when + using the built-in cloud provider integration or the external cloud + controller manager. The namespace for this config map is openshift-config. + \n cloudConfig should only be consumed by the kube_cloud_config + controller. The controller is responsible for using the user configuration + in the spec for various platforms and combining that with the user + provided ConfigMap in this field to create a stitched kube cloud + config. The controller generates a ConfigMap `kube-cloud-config` + in `openshift-config-managed` namespace with the kube cloud config + is stored in `cloud.conf` key. All the clients are expected to use + the generated ConfigMap only." + type: object + properties: + key: + description: Key allows pointing to a specific key/value inside + of the configmap. This is useful for logical file references. + type: string + name: + type: string + platformSpec: + description: platformSpec holds desired information specific to the + underlying infrastructure provider. + type: object + properties: + aws: + description: AWS contains settings specific to the Amazon Web + Services infrastructure provider. + type: object + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom endpoints + which will override default service endpoint of AWS Services. + There must be only one ServiceEndpoint for a service. + type: array + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults of AWS Services. + type: object + properties: + name: + description: name is the name of the AWS service. The + list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + type: string + pattern: ^https:// + azure: + description: Azure contains settings specific to the Azure infrastructure + provider. + type: object + baremetal: + description: BareMetal contains settings specific to the BareMetal + platform. + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix + Metal infrastructure provider. + type: object + gcp: + description: GCP contains settings specific to the Google Cloud + Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud + infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt + infrastructure provider. + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack + infrastructure provider. + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure + provider. + type: object + type: + description: type is the underlying infrastructure provider for + the cluster. This value controls whether infrastructure automation + such as service load balancers, dynamic volume provisioning, + machine creation and deletion, and other integrations are enabled. + If None, no infrastructure automation is enabled. Allowed values + are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", + "VSphere", "oVirt", "KubeVirt", "EquinixMetal", and "None". + Individual components may not support all platforms, and must + handle unrecognized platforms as None if they do not support + that platform. + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + vsphere: + description: VSphere contains settings specific to the VSphere + infrastructure provider. + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme 'https', + address and optionally a port (defaulting to 443). apiServerInternalURL + can be used by components like kubelets, to contact the Kubernetes + API server using the infrastructure provider rather than Kubernetes + networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', address + and optionally a port (defaulting to 443). apiServerURL can be + used by components like the web console to tell users where to find + the Kubernetes API. + type: string + controlPlaneTopology: + description: controlPlaneTopology expresses the expectations for operands + that normally run on control nodes. The default is 'HighlyAvailable', + which represents the behavior operators have in a "normal" cluster. + The 'SingleReplica' mode will be used in single-node deployments + and the operators should not configure the operand for highly-available + operation + type: string + default: HighlyAvailable + enum: + - HighlyAvailable + - SingleReplica + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch the + SRV records for discovering etcd servers and clients. For more info: + https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + deprecated: as of 4.7, this field is no longer set or honored. It + will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster with + a human friendly name. Once set it should not be changed. Must be + of max length 27 and must have only alphanumeric or hyphen characters. + type: string + infrastructureTopology: + description: infrastructureTopology expresses the expectations for + infrastructure services that do not run on control plane nodes, + usually indicated by a node selector for a `role` value other than + `master`. The default is 'HighlyAvailable', which represents the + behavior operators have in a "normal" cluster. The 'SingleReplica' + mode will be used in single-node deployments and the operators should + not configure the operand for highly-available operation + type: string + default: HighlyAvailable + enum: + - HighlyAvailable + - SingleReplica + platform: + description: "platform is the underlying infrastructure provider for + the cluster. \n Deprecated: Use platformStatus.type instead." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + platformStatus: + description: platformStatus holds status information specific to the + underlying infrastructure provider. + type: object + properties: + aws: + description: AWS contains settings specific to the Amazon Web + Services infrastructure provider. + type: object + properties: + region: + description: region holds the default AWS region for new AWS + resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to + apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + for information on tagging AWS resources. AWS supports a + maximum of 50 tags per resource. OpenShift reserves 25 tags + for its use, leaving 25 tags available for the user. + type: array + maxItems: 25 + items: + description: AWSResourceTag is a tag to apply to AWS resources + created for the cluster. + type: object + required: + - key + - value + properties: + key: + description: key is the key of the tag + type: string + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + value: + description: value is the value of the tag. Some AWS + service do not support empty values. Since tags are + added to resources in many services, the length of + the tag value must meet the requirements of all services. + type: string + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + serviceEndpoints: + description: ServiceEndpoints list contains custom endpoints + which will override default service endpoint of AWS Services. + There must be only one ServiceEndpoint for a service. + type: array + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults of AWS Services. + type: object + properties: + name: + description: name is the name of the AWS service. The + list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + type: string + pattern: ^https:// + azure: + description: Azure contains settings specific to the Azure infrastructure + provider. + type: object + properties: + cloudName: + description: cloudName is the name of the Azure cloud environment + which can be used to configure the Azure SDK with the appropriate + Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. + type: string + enum: + - "" + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group + for network resources like the Virtual Network and Subnets + used by the cluster. If empty, the value is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group for new + Azure resources created for the cluster. + type: string + baremetal: + description: BareMetal contains settings specific to the BareMetal + platform. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by the DNS + operator, `NodeDNSIP` provides name resolution for the nodes + themselves. There is no DNS-as-a-service for BareMetal deployments. + In order to minimize necessary changes to the datacenter + DNS, a DNS service is hosted as a static pod to serve those + hostnames to the nodes in the cluster. + type: string + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix + Metal infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + gcp: + description: GCP contains settings specific to the Google Cloud + Platform infrastructure provider. + type: object + properties: + projectID: + description: resourceGroupName is the Project ID for new GCP + resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources + created for the cluster. + type: string + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud + infrastructure provider. + type: object + properties: + location: + description: Location is where the cluster has been deployed + type: string + providerType: + description: ProviderType indicates the type of cluster that + was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group for new + IBMCloud resources created for the cluster. + type: string + kubevirt: + description: Kubevirt contains settings specific to the kubevirt + infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + openstack: + description: OpenStack contains settings specific to the OpenStack + infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + cloudName: + description: cloudName is the name of the desired OpenStack + cloud in the client configuration file (`clouds.yaml`). + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by the DNS + operator, `NodeDNSIP` provides name resolution for the nodes + themselves. There is no DNS-as-a-service for OpenStack deployments. + In order to minimize necessary changes to the datacenter + DNS, a DNS service is hosted as a static pod to serve those + hostnames to the nodes in the cluster. + type: string + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure + provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is no longer + set or honored. It will be removed in a future release.' + type: string + type: + description: "type is the underlying infrastructure provider for + the cluster. This value controls whether infrastructure automation + such as service load balancers, dynamic volume provisioning, + machine creation and deletion, and other integrations are enabled. + If None, no infrastructure automation is enabled. Allowed values + are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", + \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", and + \"None\". Individual components may not support all platforms, + and must handle unrecognized platforms as None if they do not + support that platform. \n This value will be synced with to + the `status.platform` and `status.platformStatus.type`. Currently + this value cannot be changed once set." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + vsphere: + description: VSphere contains settings specific to the VSphere + infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by the DNS + operator, `NodeDNSIP` provides name resolution for the nodes + themselves. There is no DNS-as-a-service for vSphere deployments. + In order to minimize necessary changes to the datacenter + DNS, a DNS service is hosted as a static pod to serve those + hostnames to the nodes in the cluster. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml new file mode 100644 index 0000000000..1a7c294c0e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml @@ -0,0 +1,300 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: ingresses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Ingress + listKind: IngressList + plural: ingresses + singular: ingress + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Ingress holds cluster-wide information about ingress, including + the default ingress domain used for routes. The canonical name is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + appsDomain: + description: appsDomain is an optional domain to use instead of the + one specified in the domain field when a Route is created without + specifying an explicit host. If appsDomain is nonempty, this value + is used to generate default host values for Route. Unlike domain, + appsDomain may be modified after installation. This assumes a new + ingresscontroller has been setup with a wildcard certificate. + type: string + componentRoutes: + description: "componentRoutes is an optional list of routes that are + managed by OpenShift components that a cluster-admin is able to + configure the hostname and serving certificate for. The namespace + and name of each route in this list should match an existing entry + in the status.componentRoutes list. \n To determine the set of configurable + Routes, look at namespace and name of entries in the .status.componentRoutes + list, where participating operators write the status of configurable + routes." + type: array + items: + description: ComponentRouteSpec allows for configuration of a route's + hostname and serving certificate. + type: object + required: + - hostname + - name + - namespace + properties: + hostname: + description: hostname is the hostname that should be used by + the route. + type: string + format: hostname + name: + description: "name is the logical name of the route to customize. + \n The namespace and name of this componentRoute must match + a corresponding entry in the list of status.componentRoutes + if the route is to be customized." + type: string + maxLength: 256 + minLength: 1 + namespace: + description: "namespace is the namespace of the route to customize. + \n The namespace and name of this componentRoute must match + a corresponding entry in the list of status.componentRoutes + if the route is to be customized." + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + servingCertKeyPairSecret: + description: servingCertKeyPairSecret is a reference to a secret + of type `kubernetes.io/tls` in the openshift-config namespace. + The serving cert/key pair must match and will be used by the + operator to fulfill the intent of serving with this name. + If the custom hostname uses the default routing suffix of + the cluster, the Secret specification for a serving certificate + will not be needed. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + domain: + description: "domain is used to generate a default host name for a + route when the route's host name is empty. The generated host name + will follow this pattern: \"..\". + \n It is also used as the default wildcard domain suffix for ingress. + The default ingresscontroller domain will follow this pattern: \"*.\". + \n Once set, changing domain is not currently supported." + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + componentRoutes: + description: componentRoutes is where participating operators place + the current route status for routes whose hostnames and serving + certificates can be customized by the cluster-admin. + type: array + items: + description: ComponentRouteStatus contains information allowing + configuration of a route's hostname and serving certificate. + type: object + required: + - defaultHostname + - name + - namespace + - relatedObjects + properties: + conditions: + description: "conditions are used to communicate the state of + the componentRoutes entry. \n Supported conditions include + Available, Degraded and Progressing. \n If available is true, + the content served by the route can be accessed by users. + This includes cases where a default may continue to serve + content while the customized route specified by the cluster-admin + is being configured. \n If Degraded is true, that means something + has gone wrong trying to handle the componentRoutes entry. + The currentHostnames field may or may not be in effect. \n + If Progressing is true, that means the component is taking + some action related to the componentRoutes entry." + type: array + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, type FooStatus struct{ + \ // Represents the observations of a foo's current state. + \ // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // + +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, + Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + consumingUsers: + description: consumingUsers is a slice of ServiceAccounts that + need to have read permission on the servingCertKeyPairSecret + secret. + type: array + maxItems: 5 + items: + description: ConsumingUser is an alias for string which we + add validation to. Currently only service accounts are supported. + type: string + maxLength: 512 + minLength: 1 + pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + currentHostnames: + description: currentHostnames is the list of current names used + by the route. Typically, this list should consist of a single + hostname, but if multiple hostnames are supported by the route + the operator may write multiple entries to this list. + type: array + minItems: 1 + items: + description: Hostname is an alias for hostname string validation. + type: string + format: hostname + defaultHostname: + description: defaultHostname is the hostname of this route prior + to customization. + type: string + format: hostname + name: + description: "name is the logical name of the route to customize. + It does not have to be the actual name of a route resource + but it cannot be renamed. \n The namespace and name of this + componentRoute must match a corresponding entry in the list + of spec.componentRoutes if the route is to be customized." + type: string + maxLength: 256 + minLength: 1 + namespace: + description: "namespace is the namespace of the route to customize. + It must be a real namespace. Using an actual namespace ensures + that no two components will conflict and the same component + can be installed multiple times. \n The namespace and name + of this componentRoute must match a corresponding entry in + the list of spec.componentRoutes if the route is to be customized." + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + relatedObjects: + description: relatedObjects is a list of resources which are + useful when debugging or inspecting how spec.componentRoutes + is applied. + type: array + minItems: 1 + items: + description: ObjectReference contains enough information to + let you inspect or modify the referred object. + type: object + required: + - group + - name + - resource + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml new file mode 100644 index 0000000000..ae5b3a7332 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml @@ -0,0 +1,173 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: networks.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: 'Network holds cluster-wide information about Network. The canonical + name is `cluster`. It is used to configure the desired network configuration, + such as: IP address pools for services/pod IPs, network plugin, etc. Please + view network.spec for an explanation on what applies when configuring this + resource.' + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration. As a general + rule, this SHOULD NOT be read directly. Instead, you should consume + the NetworkStatus, as it indicates the currently deployed configuration. + Currently, most spec fields are immutable after installation. Please + view the individual ones for further details on each. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. This field is immutable + after installation. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses + from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each + node. If this field is not used by the plugin, it can be left + unset. + type: integer + format: int32 + minimum: 0 + externalIP: + description: externalIP defines configuration for controllers that + affect Service.ExternalIP. If nil, then ExternalIP is not allowed + to be set. + type: object + properties: + autoAssignCIDRs: + description: autoAssignCIDRs is a list of CIDRs from which to + automatically assign Service.ExternalIP. These are assigned + when the service is of type LoadBalancer. In general, this is + only useful for bare-metal clusters. In Openshift 3.x, this + was misleadingly called "IngressIPs". Automatically assigned + External IPs are not affected by any ExternalIPPolicy rules. + Currently, only one entry may be provided. + type: array + items: + type: string + policy: + description: policy is a set of restrictions applied to the ExternalIP + field. If nil or empty, then ExternalIP is not allowed to be + set. + type: object + properties: + allowedCIDRs: + description: allowedCIDRs is the list of allowed CIDRs. + type: array + items: + type: string + rejectedCIDRs: + description: rejectedCIDRs is the list of disallowed CIDRs. + These take precedence over allowedCIDRs. + type: array + items: + type: string + networkType: + description: 'NetworkType is the plugin that is to be deployed (e.g. + OpenShiftSDN). This should match a value that the cluster-network-operator + understands, or else no networking will be installed. Currently + supported values are: - OpenShiftSDN This field is immutable after + installation.' + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support + a single entry here. This field is immutable after installation. + type: array + items: + type: string + serviceNodePortRange: + description: The port range allowed for Services of type NodePort. + If not specified, the default of 30000-32767 will be used. Such + Services without a NodePort specified will have one automatically + allocated from this range. This parameter can be updated after the + cluster is installed. + type: string + pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses + from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each + node. If this field is not used by the plugin, it can be left + unset. + type: integer + format: int32 + minimum: 0 + clusterNetworkMTU: + description: ClusterNetworkMTU is the MTU for inter-pod networking. + type: integer + migration: + description: Migration contains the cluster network migration configuration. + type: object + properties: + networkType: + description: 'NetworkType is the target plugin that is to be deployed. + Currently supported values are: OpenShiftSDN, OVNKubernetes' + type: string + enum: + - OpenShiftSDN + - OVNKubernetes + networkType: + description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support + a single entry here. + type: array + items: + type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml new file mode 100644 index 0000000000..cb4e6d6c55 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml @@ -0,0 +1,677 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: oauths.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OAuth + listKind: OAuthList + plural: oauths + singular: oauth + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OAuth holds cluster-wide information about OAuth. The canonical + name is `cluster`. It is used to configure the integrated OAuth server. + This configuration is only honored when the top level Authentication config + has type set to IntegratedOAuth. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + identityProviders: + description: identityProviders is an ordered list of ways for a user + to identify themselves. When this list is empty, no identities are + provisioned for users. + type: array + items: + description: IdentityProvider provides identities for users authenticating + using credentials + type: object + properties: + basicAuth: + description: basicAuth contains configuration options for the + BasicAuth IdP + type: object + properties: + ca: + description: ca is an optional reference to a config map + by name containing the PEM-encoded CA bundle. It is used + as a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected + key is not found, the identity provider is not honored. + If the specified ca data is not valid, the identity provider + is not honored. If empty, the default system roots are + used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a + secret by name that contains the PEM-encoded TLS client + certificate to present when connecting to the server. + The key "tls.crt" is used to locate the data. If specified + and the secret or expected key is not found, the identity + provider is not honored. If the specified certificate + data is not valid, the identity provider is not honored. + The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a + secret by name that contains the PEM-encoded TLS private + key for the client certificate referenced in tlsClientCert. + The key "tls.key" is used to locate the data. If specified + and the secret or expected key is not found, the identity + provider is not honored. If the specified certificate + data is not valid, the identity provider is not honored. + The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + url: + description: url is the remote URL to connect to + type: string + github: + description: github enables user authentication using GitHub + credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map + by name containing the PEM-encoded CA bundle. It is used + as a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected + key is not found, the identity provider is not honored. + If the specified ca data is not valid, the identity provider + is not honored. If empty, the default system roots are + used. This can only be configured when hostname is set + to a non-empty value. The namespace for this config map + is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the + secret by name containing the oauth client secret. The + key "clientSecret" is used to locate the data. If the + secret or expected key is not found, the identity provider + is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + hostname: + description: hostname is the optional domain (e.g. "mycompany.com") + for use with a hosted instance of GitHub Enterprise. It + must match the GitHub Enterprise settings value configured + at /setup/settings#hostname. + type: string + organizations: + description: organizations optionally restricts which organizations + are allowed to log in + type: array + items: + type: string + teams: + description: teams optionally restricts which teams are + allowed to log in. Format is /. + type: array + items: + type: string + gitlab: + description: gitlab enables user authentication using GitLab + credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map + by name containing the PEM-encoded CA bundle. It is used + as a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected + key is not found, the identity provider is not honored. + If the specified ca data is not valid, the identity provider + is not honored. If empty, the default system roots are + used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the + secret by name containing the oauth client secret. The + key "clientSecret" is used to locate the data. If the + secret or expected key is not found, the identity provider + is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + url: + description: url is the oauth server base URL + type: string + google: + description: google enables user authentication using Google + credentials + type: object + properties: + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the + secret by name containing the oauth client secret. The + key "clientSecret" is used to locate the data. If the + secret or expected key is not found, the identity provider + is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + hostedDomain: + description: hostedDomain is the optional Google App domain + (e.g. "mycompany.com") to restrict logins to + type: string + htpasswd: + description: htpasswd enables user authentication using an HTPasswd + file to validate credentials + type: object + properties: + fileData: + description: fileData is a required reference to a secret + by name containing the data to use as the htpasswd file. + The key "htpasswd" is used to locate the data. If the + secret or expected key is not found, the identity provider + is not honored. If the specified htpasswd data is not + valid, the identity provider is not honored. The namespace + for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + keystone: + description: keystone enables user authentication using keystone + password credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map + by name containing the PEM-encoded CA bundle. It is used + as a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected + key is not found, the identity provider is not honored. + If the specified ca data is not valid, the identity provider + is not honored. If empty, the default system roots are + used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + domainName: + description: domainName is required for keystone v3 + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a + secret by name that contains the PEM-encoded TLS client + certificate to present when connecting to the server. + The key "tls.crt" is used to locate the data. If specified + and the secret or expected key is not found, the identity + provider is not honored. If the specified certificate + data is not valid, the identity provider is not honored. + The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a + secret by name that contains the PEM-encoded TLS private + key for the client certificate referenced in tlsClientCert. + The key "tls.key" is used to locate the data. If specified + and the secret or expected key is not found, the identity + provider is not honored. If the specified certificate + data is not valid, the identity provider is not honored. + The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + url: + description: url is the remote URL to connect to + type: string + ldap: + description: ldap enables user authentication using LDAP credentials + type: object + properties: + attributes: + description: attributes maps LDAP attributes to identities + type: object + properties: + email: + description: email is the list of attributes whose values + should be used as the email address. Optional. If + unspecified, no email is set for the identity + type: array + items: + type: string + id: + description: id is the list of attributes whose values + should be used as the user ID. Required. First non-empty + attribute is used. At least one attribute is required. + If none of the listed attribute have a value, authentication + fails. LDAP standard identity attribute is "dn" + type: array + items: + type: string + name: + description: name is the list of attributes whose values + should be used as the display name. Optional. If unspecified, + no display name is set for the identity LDAP standard + display name attribute is "cn" + type: array + items: + type: string + preferredUsername: + description: preferredUsername is the list of attributes + whose values should be used as the preferred username. + LDAP standard login attribute is "uid" + type: array + items: + type: string + bindDN: + description: bindDN is an optional DN to bind with during + the search phase. + type: string + bindPassword: + description: bindPassword is an optional reference to a + secret by name containing a password to bind with during + the search phase. The key "bindPassword" is used to locate + the data. If specified and the secret or expected key + is not found, the identity provider is not honored. The + namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + ca: + description: ca is an optional reference to a config map + by name containing the PEM-encoded CA bundle. It is used + as a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected + key is not found, the identity provider is not honored. + If the specified ca data is not valid, the identity provider + is not honored. If empty, the default system roots are + used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + insecure: + description: 'insecure, if true, indicates the connection + should not use TLS WARNING: Should not be set to `true` + with the URL scheme "ldaps://" as "ldaps://" URLs always attempt + to connect using TLS, even when `insecure` is set to `true` + When `true`, "ldap://" URLS connect insecurely. When `false`, + "ldap://" URLs are upgraded to a TLS connection using + StartTLS as specified in https://tools.ietf.org/html/rfc2830.' + type: boolean + url: + description: 'url is an RFC 2255 URL which specifies the + LDAP search parameters to use. The syntax of the URL is: + ldap://host:port/basedn?attribute?scope?filter' + type: string + mappingMethod: + description: mappingMethod determines how identities from this + provider are mapped to users Defaults to "claim" + type: string + name: + description: 'name is used to qualify the identities returned + by this provider. - It MUST be unique and not shared by any + other identity provider used - It MUST be a valid path segment: + name cannot equal "." or ".." or contain "/" or "%" or ":" Ref: + https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' + type: string + openID: + description: openID enables user authentication using OpenID + credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map + by name containing the PEM-encoded CA bundle. It is used + as a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected + key is not found, the identity provider is not honored. + If the specified ca data is not valid, the identity provider + is not honored. If empty, the default system roots are + used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + claims: + description: claims mappings + type: object + properties: + email: + description: email is the list of claims whose values + should be used as the email address. Optional. If + unspecified, no email is set for the identity + type: array + items: + type: string + name: + description: name is the list of claims whose values + should be used as the display name. Optional. If unspecified, + no display name is set for the identity + type: array + items: + type: string + preferredUsername: + description: preferredUsername is the list of claims + whose values should be used as the preferred username. + If unspecified, the preferred username is determined + from the value of the sub claim + type: array + items: + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the + secret by name containing the oauth client secret. The + key "clientSecret" is used to locate the data. If the + secret or expected key is not found, the identity provider + is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + extraAuthorizeParameters: + description: extraAuthorizeParameters are any custom parameters + to add to the authorize request. + type: object + additionalProperties: + type: string + extraScopes: + description: extraScopes are any scopes to request in addition + to the standard "openid" scope. + type: array + items: + type: string + issuer: + description: issuer is the URL that the OpenID Provider + asserts as its Issuer Identifier. It must use the https + scheme with no query or fragment component. + type: string + requestHeader: + description: requestHeader enables user authentication using + request header credentials + type: object + properties: + ca: + description: ca is a required reference to a config map + by name containing the PEM-encoded CA bundle. It is used + as a trust anchor to validate the TLS certificate presented + by the remote server. Specifically, it allows verification + of incoming requests to prevent header spoofing. The key + "ca.crt" is used to locate the data. If the config map + or expected key is not found, the identity provider is + not honored. If the specified ca data is not valid, the + identity provider is not honored. The namespace for this + config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + challengeURL: + description: challengeURL is a URL to redirect unauthenticated + /authorize requests to Unauthenticated requests from OAuth + clients which expect WWW-Authenticate challenges will + be redirected here. ${url} is replaced with the current + URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} + ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} + Required when challenge is set to true. + type: string + clientCommonNames: + description: clientCommonNames is an optional list of common + names to require a match from. If empty, any client certificate + validated against the clientCA bundle is considered authoritative. + type: array + items: + type: string + emailHeaders: + description: emailHeaders is the set of headers to check + for the email address + type: array + items: + type: string + headers: + description: headers is the set of headers to check for + identity information + type: array + items: + type: string + loginURL: + description: loginURL is a URL to redirect unauthenticated + /authorize requests to Unauthenticated requests from OAuth + clients which expect interactive logins will be redirected + here ${url} is replaced with the current URL, escaped + to be safe in a query parameter https://www.example.com/sso-login?then=${url} + ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} + Required when login is set to true. + type: string + nameHeaders: + description: nameHeaders is the set of headers to check + for the display name + type: array + items: + type: string + preferredUsernameHeaders: + description: preferredUsernameHeaders is the set of headers + to check for the preferred username + type: array + items: + type: string + type: + description: type identifies the identity provider type for + this entry. + type: string + templates: + description: templates allow you to customize pages like the login + page. + type: object + properties: + error: + description: error is the name of a secret that specifies a go + template to use to render error pages during the authentication + or grant flow. The key "errors.html" is used to locate the template + data. If specified and the secret or expected key is not found, + the default error page is used. If the specified template is + not valid, the default error page is used. If unspecified, the + default error page is used. The namespace for this secret is + openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + login: + description: login is the name of a secret that specifies a go + template to use to render the login page. The key "login.html" + is used to locate the template data. If specified and the secret + or expected key is not found, the default login page is used. + If the specified template is not valid, the default login page + is used. If unspecified, the default login page is used. The + namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + providerSelection: + description: providerSelection is the name of a secret that specifies + a go template to use to render the provider selection page. + The key "providers.html" is used to locate the template data. + If specified and the secret or expected key is not found, the + default provider selection page is used. If the specified template + is not valid, the default provider selection page is used. If + unspecified, the default provider selection page is used. The + namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tokenConfig: + description: tokenConfig contains options for authorization and access + tokens + type: object + properties: + accessTokenInactivityTimeout: + description: accessTokenInactivityTimeout defines the token inactivity + timeout for tokens granted by any client. The value represents + the maximum amount of time that can occur between consecutive + uses of the token. Tokens become invalid if they are not used + within this temporal window. The user will need to acquire a + new token to regain access once a token times out. Takes valid + time duration string such as "5m", "1.5h" or "2h45m". The minimum + allowed value for duration is 300s (5 minutes). If the timeout + is configured per client, then that value takes precedence. + If the timeout value is not specified and the client does not + override the value, then tokens are valid until their lifetime. + type: string + accessTokenInactivityTimeoutSeconds: + description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: + setting this field has no effect.' + type: integer + format: int32 + accessTokenMaxAgeSeconds: + description: accessTokenMaxAgeSeconds defines the maximum age + of access tokens + type: integer + format: int32 + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml new file mode 100644 index 0000000000..0e433c57df --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml @@ -0,0 +1,67 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: projects.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Project + listKind: ProjectList + plural: projects + singular: project + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Project holds cluster-wide information about Project. The canonical + name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + projectRequestMessage: + description: projectRequestMessage is the string presented to a user + if they are unable to request a project via the projectrequest api + endpoint + type: string + projectRequestTemplate: + description: projectRequestTemplate is the template to use for creating + projects in response to projectrequest. This must point to a template + in 'openshift-config' namespace. It is optional. If it is not specified, + a default template is used. + type: object + properties: + name: + description: name is the metadata.name of the referenced project + request template + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml new file mode 100644 index 0000000000..41663c4e43 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml @@ -0,0 +1,107 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: schedulers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Scheduler + listKind: SchedulerList + plural: schedulers + singular: scheduler + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Scheduler holds cluster-wide config information to run the Kubernetes + Scheduler and influence its placement decisions. The canonical name for + this config is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + defaultNodeSelector: + description: 'defaultNodeSelector helps set the cluster-wide default + node selector to restrict pod placement to specific nodes. This + is applied to the pods created in all namespaces and creates an + intersection with any existing nodeSelectors already set on a pod, + additionally constraining that pod''s selector. For example, defaultNodeSelector: + "type=user-node,region=east" would set nodeSelector field in pod + spec to "type=user-node,region=east" to all pods created in all + namespaces. Namespaces having project-wide node selectors won''t + be impacted even if this field is set. This adds an annotation section + to the namespace. For example, if a new namespace is created with + node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: + type=user-node,region=east gets added to the project. When the openshift.io/node-selector + annotation is set on the project the value is used in preference + to the value we are setting for defaultNodeSelector field. For instance, + openshift.io/node-selector: "type=user-node,region=west" means that + the default of "type=user-node,region=east" set in defaultNodeSelector + would not be applied.' + type: string + mastersSchedulable: + description: 'MastersSchedulable allows masters nodes to be schedulable. + When this flag is turned on, all the master nodes in the cluster + will be made schedulable, so that workload pods can run on them. + The default value for this field is false, meaning none of the master + nodes are schedulable. Important Note: Once the workload pods start + running on the master nodes, extreme care must be taken to ensure + that cluster-critical control plane components are not impacted. + Please turn on this field after doing due diligence.' + type: boolean + policy: + description: 'DEPRECATED: the scheduler Policy API has been deprecated + and will be removed in a future release. policy is a reference to + a ConfigMap containing scheduler policy which has user specified + predicates and priorities. If this ConfigMap is not available scheduler + will default to use DefaultAlgorithmProvider. The namespace for + this configmap is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + profile: + description: "profile sets which scheduling profile should be set + in order to configure scheduling decisions for new pods. \n Valid + values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" + Defaults to \"LowNodeUtilization\"" + type: string + enum: + - "" + - LowNodeUtilization + - HighNodeUtilization + - NoScoring + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go new file mode 100644 index 0000000000..4ff5208f2c --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go new file mode 100644 index 0000000000..35eace3701 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -0,0 +1,70 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &APIServer{}, + &APIServerList{}, + &Authentication{}, + &AuthenticationList{}, + &Build{}, + &BuildList{}, + &ClusterOperator{}, + &ClusterOperatorList{}, + &ClusterVersion{}, + &ClusterVersionList{}, + &Console{}, + &ConsoleList{}, + &DNS{}, + &DNSList{}, + &FeatureGate{}, + &FeatureGateList{}, + &Image{}, + &ImageList{}, + &Infrastructure{}, + &InfrastructureList{}, + &Ingress{}, + &IngressList{}, + &Network{}, + &NetworkList{}, + &OAuth{}, + &OAuthList{}, + &OperatorHub{}, + &OperatorHubList{}, + &Project{}, + &ProjectList{}, + &Proxy{}, + &ProxyList{}, + &Scheduler{}, + &SchedulerList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/config/v1/stringsource.go b/vendor/github.com/openshift/api/config/v1/stringsource.go new file mode 100644 index 0000000000..6a5718c1db --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/stringsource.go @@ -0,0 +1,31 @@ +package v1 + +import "encoding/json" + +// UnmarshalJSON implements the json.Unmarshaller interface. +// If the value is a string, it sets the Value field of the StringSource. +// Otherwise, it is unmarshaled into the StringSourceSpec struct +func (s *StringSource) UnmarshalJSON(value []byte) error { + // If we can unmarshal to a simple string, just set the value + var simpleValue string + if err := json.Unmarshal(value, &simpleValue); err == nil { + s.Value = simpleValue + return nil + } + + // Otherwise do the full struct unmarshal + return json.Unmarshal(value, &s.StringSourceSpec) +} + +// MarshalJSON implements the json.Marshaller interface. +// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string. +// Otherwise, the StringSourceSpec struct is marshaled as a JSON object. +func (s *StringSource) MarshalJSON() ([]byte, error) { + // If we have only a cleartext value set, do a simple string marshal + if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) { + return json.Marshal(s.Value) + } + + // Otherwise do the full struct marshal of the externalized bits + return json.Marshal(s.StringSourceSpec) +} diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go new file mode 100644 index 0000000000..1427484236 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -0,0 +1,312 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ConfigMapFileReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapFileReference struct { + Name string `json:"name"` + // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + Key string `json:"key,omitempty"` +} + +// ConfigMapNameReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapNameReference struct { + // name is the metadata.name of the referenced config map + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} + +// SecretNameReference references a secret in a specific namespace. +// The namespace must be specified at the point of use. +type SecretNameReference struct { + // name is the metadata.name of the referenced secret + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} + +// HTTPServingInfo holds configuration for serving HTTP +type HTTPServingInfo struct { + // ServingInfo is the HTTP serving information + ServingInfo `json:",inline"` + // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + MaxRequestsInFlight int64 `json:"maxRequestsInFlight"` + // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // -1 there is no limit on requests. + RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"` +} + +// ServingInfo holds information about serving web pages +type ServingInfo struct { + // BindAddress is the ip:port to serve on + BindAddress string `json:"bindAddress"` + // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // "tcp4", and "tcp6" + BindNetwork string `json:"bindNetwork"` + // CertInfo is the TLS cert info for serving secure traffic. + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // +optional + ClientCA string `json:"clientCA,omitempty"` + // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"` + // MinTLSVersion is the minimum TLS version supported. + // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + MinTLSVersion string `json:"minTLSVersion,omitempty"` + // CipherSuites contains an overridden list of ciphers for the server to support. + // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants + CipherSuites []string `json:"cipherSuites,omitempty"` +} + +// CertInfo relates a certificate with a private key +type CertInfo struct { + // CertFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` + // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string `json:"keyFile"` +} + +// NamedCertificate specifies a certificate/key, and the names it should be served for +type NamedCertificate struct { + // Names is a list of DNS names this certificate should be used to secure + // A name can be a normal DNS name, or can contain leading wildcard segments. + Names []string `json:"names,omitempty"` + // CertInfo is the TLS cert info for serving secure traffic + CertInfo `json:",inline"` +} + +// LeaderElection provides information to elect a leader +type LeaderElection struct { + // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case. + Disable bool `json:"disable,omitempty"` + // namespace indicates which namespace the resource is in + Namespace string `json:"namespace,omitempty"` + // name indicates what name to use for the resource + Name string `json:"name,omitempty"` + + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + // +nullable + LeaseDuration metav1.Duration `json:"leaseDuration"` + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + // +nullable + RenewDeadline metav1.Duration `json:"renewDeadline"` + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + // +nullable + RetryPeriod metav1.Duration `json:"retryPeriod"` +} + +// StringSource allows specifying a string inline, or externally via env var or file. +// When it contains only a string value, it marshals to a simple JSON string. +type StringSource struct { + // StringSourceSpec specifies the string value, or external location + StringSourceSpec `json:",inline"` +} + +// StringSourceSpec specifies a string value, or external location +type StringSourceSpec struct { + // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + Value string `json:"value"` + + // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + Env string `json:"env"` + + // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + File string `json:"file"` + + // KeyFile references a file containing the key to use to decrypt the value. + KeyFile string `json:"keyFile"` +} + +// RemoteConnectionInfo holds information necessary for establishing a remote connection +type RemoteConnectionInfo struct { + // URL is the remote URL to connect to + URL string `json:"url"` + // CA is the CA for verifying TLS connections + CA string `json:"ca"` + // CertInfo is the TLS client cert information to present + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type AdmissionConfig struct { + PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"` + + // enabledPlugins is a list of admission plugins that must be on in addition to the default list. + // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon + // and can result in performance penalties and unexpected behavior. + EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"` + + // disabledPlugins is a list of admission plugins that must be off. Putting something in this list + // is almost always a mistake and likely to result in cluster instability. + DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"` +} + +// AdmissionPluginConfig holds the necessary configuration options for admission plugins +type AdmissionPluginConfig struct { + // Location is the path to a configuration file that contains the plugin's + // configuration + Location string `json:"location"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + Configuration runtime.RawExtension `json:"configuration"` +} + +type LogFormatType string + +type WebHookModeType string + +const ( + // LogFormatLegacy saves event in 1-line text format. + LogFormatLegacy LogFormatType = "legacy" + // LogFormatJson saves event in structured json format. + LogFormatJson LogFormatType = "json" + + // WebHookModeBatch indicates that the webhook should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + WebHookModeBatch WebHookModeType = "batch" + // WebHookModeBlocking causes the webhook to block on every attempt to process + // a set of events. This causes requests to the API server to wait for a + // round trip to the external audit service before sending a response. + WebHookModeBlocking WebHookModeType = "blocking" +) + +// AuditConfig holds configuration for the audit capabilities +type AuditConfig struct { + // If this flag is set, audit log will be printed in the logs. + // The logs contains, method, user and a requested URL. + Enabled bool `json:"enabled"` + // All requests coming to the apiserver will be logged to this file. + AuditFilePath string `json:"auditFilePath"` + // Maximum number of days to retain old log files based on the timestamp encoded in their filename. + MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"` + // Maximum number of old log files to retain. + MaximumRetainedFiles int32 `json:"maximumRetainedFiles"` + // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. + MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"` + + // PolicyFile is a path to the file that defines the audit policy configuration. + PolicyFile string `json:"policyFile"` + // PolicyConfiguration is an embedded policy configuration object to be used + // as the audit policy configuration. If present, it will be used instead of + // the path to the policy file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` + + // Format of saved audits (legacy or json). + LogFormat LogFormatType `json:"logFormat"` + + // Path to a .kubeconfig formatted file that defines the audit webhook configuration. + WebHookKubeConfig string `json:"webHookKubeConfig"` + // Strategy for sending audit events (block or batch). + WebHookMode WebHookModeType `json:"webHookMode"` +} + +// EtcdConnectionInfo holds information necessary for connecting to an etcd server +type EtcdConnectionInfo struct { + // URLs are the URLs for etcd + URLs []string `json:"urls,omitempty"` + // CA is a file containing trusted roots for the etcd server certificates + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to etcd + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type EtcdStorageConfig struct { + EtcdConnectionInfo `json:",inline"` + + // StoragePrefix is the path within etcd that the OpenShift resources will + // be rooted under. This value, if changed, will mean existing objects in etcd will + // no longer be located. + StoragePrefix string `json:"storagePrefix"` +} + +// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd +type GenericAPIServerConfig struct { + // servingInfo describes how to start serving + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // corsAllowedOrigins + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + + // auditConfig describes how to configure audit information + AuditConfig AuditConfig `json:"auditConfig"` + + // storageConfig contains information about how to use + StorageConfig EtcdStorageConfig `json:"storageConfig"` + + // admissionConfig holds information about how to configure admission. + AdmissionConfig AdmissionConfig `json:"admission"` + + KubeClientConfig KubeClientConfig `json:"kubeClientConfig"` +} + +type KubeClientConfig struct { + // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config + KubeConfig string `json:"kubeConfig"` + + // connectionOverrides specifies client overrides for system components to loop back to this master. + ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"` +} + +type ClientConnectionOverrides struct { + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string `json:"acceptContentTypes"` + // contentType is the content type used when sending data to the server from this client. + ContentType string `json:"contentType"` + + // qps controls the number of queries per second allowed for this connection. + QPS float32 `json:"qps"` + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 `json:"burst"` +} + +// GenericControllerConfig provides information to configure a controller +type GenericControllerConfig struct { + // ServingInfo is the HTTP serving information for the controller's endpoints + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // leaderElection provides information to elect a leader. Only override this if you have a specific need + LeaderElection LeaderElection `json:"leaderElection"` + + // authentication allows configuration of authentication for the endpoints + Authentication DelegatedAuthentication `json:"authentication"` + // authorization allows configuration of authentication for the endpoints + Authorization DelegatedAuthorization `json:"authorization"` +} + +// DelegatedAuthentication allows authentication to be disabled. +type DelegatedAuthentication struct { + // disabled indicates that authentication should be disabled. By default it will use delegated authentication. + Disabled bool `json:"disabled,omitempty"` +} + +// DelegatedAuthorization allows authorization to be disabled. +type DelegatedAuthorization struct { + // disabled indicates that authorization should be disabled. By default it will use delegated authorization. + Disabled bool `json:"disabled,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go new file mode 100644 index 0000000000..42268db39b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -0,0 +1,158 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIServer holds configuration (like serving certificates, client CA and CORS domains) +// shared by all API servers in the system, among them especially kube-apiserver +// and openshift-apiserver. The canonical name of an instance is 'cluster'. +type APIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec APIServerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status APIServerStatus `json:"status"` +} + +type APIServerSpec struct { + // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates + // will be used for serving secure traffic. + // +optional + ServingCerts APIServerServingCerts `json:"servingCerts"` + // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for + // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. + // You usually only have to set this if you have your own PKI you wish to honor client certificates from. + // The ConfigMap must exist in the openshift-config namespace and contain the following required fields: + // - ConfigMap.Data["ca-bundle.crt"] - CA bundle. + // +optional + ClientCA ConfigMapNameReference `json:"clientCA"` + // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the + // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth + // server from JavaScript applications. + // The values are regular expressions that correspond to the Golang regular expression language. + // +optional + AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` + // encryption allows the configuration of encryption of resources at the datastore layer. + // +optional + Encryption APIServerEncryption `json:"encryption"` + // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. + // + // If unset, a default (which may change between releases) is chosen. Note that only Old and + // Intermediate profiles are currently supported, and the maximum available MinTLSVersions + // is VersionTLS12. + // +optional + TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + // audit specifies the settings for audit configuration to be applied to all OpenShift-provided + // API servers in the cluster. + // +optional + // +kubebuilder:default={profile: Default} + Audit Audit `json:"audit"` +} + +// AuditProfileType defines the audit policy profile type. +// +kubebuilder:validation:Enum=Default;WriteRequestBodies;AllRequestBodies +type AuditProfileType string + +const ( + // "Default" is the existing default audit configuration policy. + AuditProfileDefaultType AuditProfileType = "Default" + + // "WriteRequestBodies" is similar to Default but it logs request and response + // HTTP payloads for write requests (create, update, patch) + WriteRequestBodiesAuditProfileType AuditProfileType = "WriteRequestBodies" + + // "AllRequestBodies" is similar to WriteRequestBodies, but also logs request + // and response HTTP payloads for read requests (get, list). + AllRequestBodiesAuditProfileType AuditProfileType = "AllRequestBodies" +) + +type Audit struct { + // profile specifies the name of the desired audit policy configuration to be deployed to + // all OpenShift-provided API servers in the cluster. + // + // The following profiles are provided: + // - Default: the existing default policy. + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // + // If unset, the 'Default' profile is used as the default. + // +kubebuilder:default=Default + Profile AuditProfileType `json:"profile,omitempty"` +} + +type APIServerServingCerts struct { + // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. + // If no named certificates are provided, or no named certificates match the server name as understood by a client, + // the defaultServingCertificate will be used. + // +optional + NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` +} + +// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. +type APIServerNamedServingCert struct { + // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to + // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. + // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + // +optional + Names []string `json:"names,omitempty"` + // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. + // The secret must exist in the openshift-config namespace and contain the following required fields: + // - Secret.Data["tls.key"] - TLS private key. + // - Secret.Data["tls.crt"] - TLS certificate. + ServingCertificate SecretNameReference `json:"servingCertificate"` +} + +type APIServerEncryption struct { + // type defines what encryption type should be used to encrypt resources at the datastore layer. + // When this field is unset (i.e. when it is set to the empty string), identity is implied. + // The behavior of unset can and will change over time. Even if encryption is enabled by default, + // the meaning of unset may change to a different encryption type based on changes in best practices. + // + // When encryption is enabled, all sensitive resources shipped with the platform are encrypted. + // This list of sensitive resources can and will change over time. The current authoritative list is: + // + // 1. secrets + // 2. configmaps + // 3. routes.route.openshift.io + // 4. oauthaccesstokens.oauth.openshift.io + // 5. oauthauthorizetokens.oauth.openshift.io + // + // +unionDiscriminator + // +optional + Type EncryptionType `json:"type,omitempty"` +} + +// +kubebuilder:validation:Enum="";identity;aescbc +type EncryptionType string + +const ( + // identity refers to a type where no encryption is performed at the datastore layer. + // Resources are written as-is without encryption. + EncryptionTypeIdentity EncryptionType = "identity" + + // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key + // is used to perform encryption at the datastore layer. + EncryptionTypeAESCBC EncryptionType = "aescbc" +) + +type APIServerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type APIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []APIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go new file mode 100644 index 0000000000..0d1041bd5f --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -0,0 +1,151 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Authentication specifies cluster-wide settings for authentication (like OAuth and +// webhook token authenticators). The canonical name of an instance is `cluster`. +type Authentication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec AuthenticationSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status AuthenticationStatus `json:"status"` +} + +type AuthenticationSpec struct { + // type identifies the cluster managed, user facing authentication mode in use. + // Specifically, it manages the component that responds to login attempts. + // The default is IntegratedOAuth. + // +optional + Type AuthenticationType `json:"type"` + + // oauthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for an external OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // If oauthMetadata.name is non-empty, this value has precedence + // over any metadata reference stored in status. + // The key "oauthMetadata" is used to locate the data. + // If specified and the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config. + // +optional + OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"` + + // webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"` + + // webhookTokenAuthenticator configures a remote token reviewer. + // These remote authentication webhooks can be used to verify bearer tokens + // via the tokenreviews.authentication.k8s.io REST API. This is required to + // honor bearer tokens that are provisioned by an external authentication service. + // +optional + WebhookTokenAuthenticator *WebhookTokenAuthenticator `json:"webhookTokenAuthenticator,omitempty"` + + // serviceAccountIssuer is the identifier of the bound service account token + // issuer. + // The default is https://kubernetes.default.svc + // WARNING: Updating this field will result in the invalidation of + // all bound tokens with the previous issuer value. Unless the + // holder of a bound token has explicit support for a change in + // issuer, they will not request a new bound token until pod + // restart or until their existing token exceeds 80% of its + // duration. + // +optional + ServiceAccountIssuer string `json:"serviceAccountIssuer"` +} + +type AuthenticationStatus struct { + // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for the in-cluster integrated OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This contains the observed value based on cluster state. + // An explicitly set value in spec.oauthMetadata has precedence over this field. + // This field has no meaning if authentication spec.type is not set to IntegratedOAuth. + // The key "oauthMetadata" is used to locate the data. + // If the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config-managed. + IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` + + // TODO if we add support for an in-cluster operator managed Keycloak instance + // KeycloakOAuthMetadata ConfigMapNameReference `json:"keycloakOAuthMetadata"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} + +type AuthenticationType string + +const ( + // None means that no cluster managed authentication system is in place. + // Note that user login will only work if a manually configured system is in place and + // referenced in authentication spec via oauthMetadata and webhookTokenAuthenticators. + AuthenticationTypeNone AuthenticationType = "None" + + // IntegratedOAuth refers to the cluster managed OAuth server. + // It is configured via the top level OAuth config. + AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth" + + // TODO if we add support for an in-cluster operator managed Keycloak instance + // AuthenticationTypeKeycloak AuthenticationType = "Keycloak" +) + +// deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. +// It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. +type DeprecatedWebhookTokenAuthenticator struct { + // kubeConfig contains kube config file data which describes how to access the remote webhook service. + // For further details, see: + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // The namespace for this secret is determined by the point of use. + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator +type WebhookTokenAuthenticator struct { + // kubeConfig references a secret that contains kube config file data which + // describes how to access the remote webhook service. + // The namespace for the referenced secret is openshift-config. + // + // For further details, see: + // + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // +kubebuilder:validation:Required + // +required + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +const ( + // OAuthMetadataKey is the key for the oauth authorization server metadata + OAuthMetadataKey = "oauthMetadata" + + // KubeConfigKey is the key for the kube config file data in a secret + KubeConfigKey = "kubeConfig" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go new file mode 100644 index 0000000000..16882e1caf --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -0,0 +1,116 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build configures the behavior of OpenShift builds for the entire cluster. +// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. +// +// The canonical name is "cluster" +type Build struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds user-settable values for the build controller configuration + // +kubebuilder:validation:Required + // +required + Spec BuildSpec `json:"spec"` +} + +type BuildSpec struct { + // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted for image pushes and pulls during builds. + // The namespace for this config map is openshift-config. + // + // DEPRECATED: Additional CAs for image pull and push should be set on + // image.config.openshift.io/cluster instead. + // + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + // BuildDefaults controls the default information for Builds + // +optional + BuildDefaults BuildDefaults `json:"buildDefaults"` + // BuildOverrides controls override settings for builds + // +optional + BuildOverrides BuildOverrides `json:"buildOverrides"` +} + +type BuildDefaults struct { + // DefaultProxy contains the default proxy settings for all build operations, including image pull/push + // and source download. + // + // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables + // in the build config's strategy. + // +optional + DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"` + + // GitProxy contains the proxy settings for git operations only. If set, this will override + // any Proxy settings for all git commands, such as git clone. + // + // Values that are not set here will be inherited from DefaultProxy. + // +optional + GitProxy *ProxySpec `json:"gitProxy,omitempty"` + + // Env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + // +optional + Env []corev1.EnvVar `json:"env,omitempty"` + + // ImageLabels is a list of docker labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // Resources defines resource requirements to execute the build. + // +optional + Resources corev1.ResourceRequirements `json:"resources"` +} + +type ImageLabel struct { + // Name defines the name of the label. It must have non-zero length. + Name string `json:"name"` + + // Value defines the literal value of the label. + // +optional + Value string `json:"value,omitempty"` +} + +type BuildOverrides struct { + // ImageLabels is a list of docker labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // NodeSelector is a selector which must be true for the build pod to fit on a node + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // ForcePull overrides, if set, the equivalent value in the builds, + // i.e. false disables force pull for all builds, + // true enables force pull for all builds, + // independently of what each build specifies itself + // +optional + ForcePull *bool `json:"forcePull,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type BuildList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Build `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go new file mode 100644 index 0000000000..92f500dfd7 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -0,0 +1,187 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterOperator is the Custom Resource object which holds the current state +// of an operator. This object is used by operators to convey their state to +// the rest of the cluster. +type ClusterOperator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec holds configuration that could apply to any operator. + // +kubebuilder:validation:Required + // +required + Spec ClusterOperatorSpec `json:"spec"` + + // status holds the information about the state of an operator. It is consistent with status information across + // the Kubernetes ecosystem. + // +optional + Status ClusterOperatorStatus `json:"status"` +} + +// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause". +type ClusterOperatorSpec struct { +} + +// ClusterOperatorStatus provides information about the status of the operator. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatus struct { + // conditions describes the state of the operator's managed and monitored components. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple + // operand entries in the array. Available operators must report the version of the operator itself with the name "operator". + // An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + // +optional + Versions []OperandVersion `json:"versions,omitempty"` + + // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: + // 1. the detailed resource driving the operator + // 2. operator namespaces + // 3. operand namespaces + // +optional + RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` + + // extension contains any additional status information specific to the + // operator which owns this status object. + // +nullable + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + Extension runtime.RawExtension `json:"extension"` +} + +type OperandVersion struct { + // name is the name of the particular operand this version is for. It usually matches container images, not operators. + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // version indicates which version of a particular operand is currently being managed. It must always match the Available + // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout + // 1.1.0 + // +kubebuilder:validation:Required + // +required + Version string `json:"version"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // group of the referent. + // +kubebuilder:validation:Required + // +required + Group string `json:"group"` + // resource of the referent. + // +kubebuilder:validation:Required + // +required + Resource string `json:"resource"` + // namespace of the referent. + // +optional + Namespace string `json:"namespace,omitempty"` + // name of the referent. + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ClusterOperatorStatusCondition represents the state of the operator's +// managed and monitored components. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatusCondition struct { + // type specifies the aspect reported by this condition. + // +kubebuilder:validation:Required + // +required + Type ClusterStatusConditionType `json:"type"` + + // status of the condition, one of True, False, Unknown. + // +kubebuilder:validation:Required + // +required + Status ConditionStatus `json:"status"` + + // lastTransitionTime is the time of the last update to the current status property. + // +kubebuilder:validation:Required + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // reason is the CamelCase reason for the condition's current status. + // +optional + Reason string `json:"reason,omitempty"` + + // message provides additional information about the current condition. + // This is only to be consumed by humans. It may contain Line Feed + // characters (U+000A), which should be rendered as new lines. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterStatusConditionType is an aspect of operator state. +type ClusterStatusConditionType string + +const ( + // Available indicates that the operand (eg: openshift-apiserver for the + // openshift-apiserver-operator), is functional and available in the cluster. + // Available=False means at least part of the component is non-functional, + // and that the condition requires immediate administrator intervention. + OperatorAvailable ClusterStatusConditionType = "Available" + + // Progressing indicates that the operator is actively rolling out new code, + // propagating config changes, or otherwise moving from one steady state to + // another. Operators should not report progressing when they are reconciling + // a previously known state. + OperatorProgressing ClusterStatusConditionType = "Progressing" + + // Degraded indicates that the operator's current state does not match its + // desired state over a period of time resulting in a lower quality of service. + // The period of time may vary by component, but a Degraded state represents + // persistent observation of a condition. As a result, a component should not + // oscillate in and out of Degraded state. A service may be Available even + // if its degraded. For example, your service may desire 3 running pods, but 1 + // pod is crash-looping. The service is Available but Degraded because it + // may have a lower quality of service. A component may be Progressing but + // not Degraded because the transition from one state to another does not + // persist over a long enough period to report Degraded. A service should not + // report Degraded during the course of a normal upgrade. A service may report + // Degraded in response to a persistent infrastructure failure that requires + // eventual administrator intervention. For example, if a control plane host + // is unhealthy and must be replaced. An operator should report Degraded if + // unexpected errors occur over a period, but the expectation is that all + // unexpected errors are handled as operators mature. + OperatorDegraded ClusterStatusConditionType = "Degraded" + + // Upgradeable indicates whether the operator is in a state that is safe to upgrade. When status is `False` + // administrators should not upgrade their cluster and the message field should contain a human readable description + // of what the administrator should do to allow the operator to successfully update. A missing condition, True, + // and Unknown are all treated by the CVO as allowing an upgrade. + OperatorUpgradeable ClusterStatusConditionType = "Upgradeable" +) + +// ClusterOperatorList is a list of OperatorStatus resources. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterOperatorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterOperator `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go new file mode 100644 index 0000000000..58a65228da --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -0,0 +1,294 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterVersion is the configuration for the ClusterVersionOperator. This is where +// parameters related to automatic updates can be set. +type ClusterVersion struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the desired state of the cluster version - the operator will work + // to ensure that the desired version is applied to the cluster. + // +kubebuilder:validation:Required + // +required + Spec ClusterVersionSpec `json:"spec"` + // status contains information about the available updates and any in-progress + // updates. + // +optional + Status ClusterVersionStatus `json:"status"` +} + +// ClusterVersionSpec is the desired version state of the cluster. It includes +// the version the cluster should be at, how the cluster is identified, and +// where the cluster should look for version updates. +// +k8s:deepcopy-gen=true +type ClusterVersionSpec struct { + // clusterID uniquely identifies this cluster. This is expected to be + // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in + // hexadecimal values). This is a required field. + // +kubebuilder:validation:Required + // +required + ClusterID ClusterID `json:"clusterID"` + + // desiredUpdate is an optional field that indicates the desired value of + // the cluster version. Setting this value will trigger an upgrade (if + // the current version does not match the desired version). The set of + // recommended update values is listed as part of available updates in + // status, and setting values outside that range may cause the upgrade + // to fail. You may specify the version field without setting image if + // an update exists with that version in the availableUpdates or history. + // + // If an upgrade fails the operator will halt and report status + // about the failing component. Setting the desired update value back to + // the previous version will cause a rollback to be attempted. Not all + // rollbacks will succeed. + // + // +optional + DesiredUpdate *Update `json:"desiredUpdate,omitempty"` + + // upstream may be used to specify the preferred update server. By default + // it will use the appropriate update server for the cluster and region. + // + // +optional + Upstream URL `json:"upstream,omitempty"` + // channel is an identifier for explicitly requesting that a non-default + // set of updates be applied to this cluster. The default channel will be + // contain stable updates that are appropriate for production clusters. + // + // +optional + Channel string `json:"channel,omitempty"` + + // overrides is list of overides for components that are managed by + // cluster version operator. Marking a component unmanaged will prevent + // the operator from creating or updating the object. + // +optional + Overrides []ComponentOverride `json:"overrides,omitempty"` +} + +// ClusterVersionStatus reports the status of the cluster versioning, +// including any upgrades that are in progress. The current field will +// be set to whichever version the cluster is reconciling to, and the +// conditions array will report whether the update succeeded, is in +// progress, or is failing. +// +k8s:deepcopy-gen=true +type ClusterVersionStatus struct { + // desired is the version that the cluster is reconciling towards. + // If the cluster is not yet fully initialized desired will be set + // with the information available, which may be an image or a tag. + // +kubebuilder:validation:Required + // +required + Desired Release `json:"desired"` + + // history contains a list of the most recent versions applied to the cluster. + // This value may be empty during cluster startup, and then will be updated + // when a new update is being applied. The newest update is first in the + // list and it is ordered by recency. Updates in the history have state + // Completed if the rollout completed - if an update was failing or halfway + // applied the state will be Partial. Only a limited amount of update history + // is preserved. + // +optional + History []UpdateHistory `json:"history,omitempty"` + + // observedGeneration reports which version of the spec is being synced. + // If this value is not equal to metadata.generation, then the desired + // and conditions fields may represent a previous version. + // +kubebuilder:validation:Required + // +required + ObservedGeneration int64 `json:"observedGeneration"` + + // versionHash is a fingerprint of the content that the cluster will be + // updated with. It is used by the operator to avoid unnecessary work + // and is for internal use only. + // +kubebuilder:validation:Required + // +required + VersionHash string `json:"versionHash"` + + // conditions provides information about the cluster version. The condition + // "Available" is set to true if the desiredUpdate has been reached. The + // condition "Progressing" is set to true if an update is being applied. + // The condition "Degraded" is set to true if an update is currently blocked + // by a temporary or permanent error. Conditions are only valid for the + // current desiredUpdate when metadata.generation is equal to + // status.generation. + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty"` + + // availableUpdates contains the list of updates that are appropriate + // for this cluster. This list may be empty if no updates are recommended, + // if the update service is unavailable, or if an invalid channel has + // been specified. + // +nullable + // +kubebuilder:validation:Required + // +required + AvailableUpdates []Release `json:"availableUpdates"` +} + +// UpdateState is a constant representing whether an update was successfully +// applied to the cluster or not. +type UpdateState string + +const ( + // CompletedUpdate indicates an update was successfully applied + // to the cluster (all resource updates were successful). + CompletedUpdate UpdateState = "Completed" + // PartialUpdate indicates an update was never completely applied + // or is currently being applied. + PartialUpdate UpdateState = "Partial" +) + +// UpdateHistory is a single attempted update to the cluster. +type UpdateHistory struct { + // state reflects whether the update was fully applied. The Partial state + // indicates the update is not fully applied, while the Completed state + // indicates the update was successfully rolled out at least once (all + // parts of the update successfully applied). + // +kubebuilder:validation:Required + // +required + State UpdateState `json:"state"` + + // startedTime is the time at which the update was started. + // +kubebuilder:validation:Required + // +required + StartedTime metav1.Time `json:"startedTime"` + // completionTime, if set, is when the update was fully applied. The update + // that is currently being applied will have a null completion time. + // Completion time will always be set for entries that are not the current + // update (usually to the started time of the next update). + // +kubebuilder:validation:Required + // +required + // +nullable + CompletionTime *metav1.Time `json:"completionTime"` + + // version is a semantic versioning identifying the update version. If the + // requested image does not define a version, or if a failure occurs + // retrieving the image, this value may be empty. + // + // +optional + Version string `json:"version"` + // image is a container image location that contains the update. This value + // is always populated. + // +kubebuilder:validation:Required + // +required + Image string `json:"image"` + // verified indicates whether the provided update was properly verified + // before it was installed. If this is false the cluster may not be trusted. + // +kubebuilder:validation:Required + // +required + Verified bool `json:"verified"` +} + +// ClusterID is string RFC4122 uuid. +type ClusterID string + +// ComponentOverride allows overriding cluster version operator's behavior +// for a component. +// +k8s:deepcopy-gen=true +type ComponentOverride struct { + // kind indentifies which object to override. + // +kubebuilder:validation:Required + // +required + Kind string `json:"kind"` + // group identifies the API group that the kind is in. + // +kubebuilder:validation:Required + // +required + Group string `json:"group"` + + // namespace is the component's namespace. If the resource is cluster + // scoped, the namespace should be empty. + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + // name is the component's name. + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // unmanaged controls if cluster version operator should stop managing the + // resources in this cluster. + // Default: false + // +kubebuilder:validation:Required + // +required + Unmanaged bool `json:"unmanaged"` +} + +// URL is a thin wrapper around string that ensures the string is a valid URL. +type URL string + +// Update represents an administrator update request. +// +k8s:deepcopy-gen=true +type Update struct { + // version is a semantic versioning identifying the update version. When this + // field is part of spec, version is optional if image is specified. + // + // +optional + Version string `json:"version"` + // image is a container image location that contains the update. When this + // field is part of spec, image is optional if version is specified and the + // availableUpdates field contains a matching version. + // + // +optional + Image string `json:"image"` + // force allows an administrator to update to an image that has failed + // verification, does not appear in the availableUpdates list, or otherwise + // would be blocked by normal protections on update. This option should only + // be used when the authenticity of the provided image has been verified out + // of band because the provided image will run with full administrative access + // to the cluster. Do not use this flag with images that comes from unknown + // or potentially malicious sources. + // + // This flag does not override other forms of consistency checking that are + // required before a new update is deployed. + // + // +optional + Force bool `json:"force"` +} + +// Release represents an OpenShift release image and associated metadata. +// +k8s:deepcopy-gen=true +type Release struct { + // version is a semantic versioning identifying the update version. When this + // field is part of spec, version is optional if image is specified. + // +required + Version string `json:"version"` + + // image is a container image location that contains the update. When this + // field is part of spec, image is optional if version is specified and the + // availableUpdates field contains a matching version. + // +required + Image string `json:"image"` + + // url contains information about this release. This URL is set by + // the 'url' metadata property on a release or the metadata returned by + // the update API and should be displayed as a link in user + // interfaces. The URL field may not be set for test or nightly + // releases. + // +optional + URL URL `json:"url,omitempty"` + + // channels is the set of Cincinnati channels to which the release + // currently belongs. + // +optional + Channels []string `json:"channels,omitempty"` +} + +// RetrievedUpdates reports whether available updates have been retrieved from +// the upstream update server. The condition is Unknown before retrieval, False +// if the updates could not be retrieved or recently failed, or True if the +// availableUpdates field is accurate and recent. +const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" + +// ClusterVersionList is a list of ClusterVersion resources. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterVersionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterVersion `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go new file mode 100644 index 0000000000..d64219300d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -0,0 +1,64 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Console holds cluster-wide configuration for the web console, including the +// logout URL, and reports the public URL of the console. The canonical name is +// `cluster`. +type Console struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ConsoleSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ConsoleStatus `json:"status"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + // +optional + Authentication ConsoleAuthentication `json:"authentication"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + // The URL for the console. This will be derived from the host for the route that + // is created for the console. + ConsoleURL string `json:"consoleURL"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} + +// ConsoleAuthentication defines a list of optional configuration for console authentication. +type ConsoleAuthentication struct { + // An optional, absolute URL to redirect web browsers to after logging out of + // the console. If not specified, it will redirect to the default login page. + // This is required when using an identity provider that supports single + // sign-on (SSO) such as: + // - OpenID (Keycloak, Azure) + // - RequestHeader (GSSAPI, SSPI, SAML) + // - OAuth (GitHub, GitLab, Google) + // Logging out of the console will destroy the user's token. The logoutRedirect + // provides the user the option to perform single logout (SLO) through the identity + // provider to destroy their single sign-on session. + // +optional + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$` + LogoutRedirect string `json:"logoutRedirect,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go new file mode 100644 index 0000000000..989ef99c3c --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -0,0 +1,87 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNS holds cluster-wide information about DNS. The canonical name is `cluster` +type DNS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec DNSSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status DNSStatus `json:"status"` +} + +type DNSSpec struct { + // baseDomain is the base domain of the cluster. All managed DNS records will + // be sub-domains of this base. + // + // For example, given the base domain `openshift.example.com`, an API server + // DNS record may be created for `cluster-api.openshift.example.com`. + // + // Once set, this field cannot be changed. + BaseDomain string `json:"baseDomain"` + // publicZone is the location where all the DNS records that are publicly accessible to + // the internet exist. + // + // If this field is nil, no public records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PublicZone *DNSZone `json:"publicZone,omitempty"` + // privateZone is the location where all the DNS records that are only available internally + // to the cluster exist. + // + // If this field is nil, no private records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PrivateZone *DNSZone `json:"privateZone,omitempty"` +} + +// DNSZone is used to define a DNS hosted zone. +// A zone can be identified by an ID or tags. +type DNSZone struct { + // id is the identifier that can be used to find the DNS hosted zone. + // + // on AWS zone can be fetched using `ID` as id in [1] + // on Azure zone can be fetched using `ID` as a pre-determined name in [2], + // on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + // +optional + ID string `json:"id,omitempty"` + + // tags can be used to query the DNS hosted zone. + // + // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + // +optional + Tags map[string]string `json:"tags,omitempty"` +} + +type DNSStatus struct { + // dnsSuffix (service-ca amongst others) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DNSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []DNS `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go new file mode 100644 index 0000000000..b083e6d1f6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -0,0 +1,208 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Feature holds cluster-wide information about feature gates. The canonical name is `cluster` +type FeatureGate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec FeatureGateSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status FeatureGateStatus `json:"status"` +} + +type FeatureSet string + +var ( + // Default feature set that allows upgrades. + Default FeatureSet = "" + + // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning + // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. + TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade" + + // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. + CustomNoUpgrade FeatureSet = "CustomNoUpgrade" + + // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature. + LatencySensitive FeatureSet = "LatencySensitive" + + // IPv6DualStackNoUpgrade enables dual-stack. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + IPv6DualStackNoUpgrade FeatureSet = "IPv6DualStackNoUpgrade" +) + +type FeatureGateSpec struct { + FeatureGateSelection `json:",inline"` +} + +// +union +type FeatureGateSelection struct { + // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. + // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + // +unionDiscriminator + // +optional + FeatureSet FeatureSet `json:"featureSet,omitempty"` + + // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + // +optional + // +nullable + CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"` +} + +type CustomFeatureGates struct { + // enabled is a list of all feature gates that you want to force on + // +optional + Enabled []string `json:"enabled,omitempty"` + // disabled is a list of all feature gates that you want to force off + // +optional + Disabled []string `json:"disabled,omitempty"` +} + +type FeatureGateStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type FeatureGateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []FeatureGate `json:"items"` +} + +type FeatureGateEnabledDisabled struct { + Enabled []string + Disabled []string +} + +// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature. +// +// NOTE: The caller needs to make sure to check for the existence of the value +// using golang's existence field. A possible scenario is an upgrade where new +// FeatureSets are added and a controller has not been upgraded with a newer +// version of this file. In this upgrade scenario the map could return nil. +// +// example: +// if featureSet, ok := FeatureSets["SomeNewFeature"]; ok { } +// +// If you put an item in either of these lists, put your area and name on it so we can find owners. +var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ + Default: defaultFeatures, + CustomNoUpgrade: { + Enabled: []string{}, + Disabled: []string{}, + }, + TechPreviewNoUpgrade: newDefaultFeatures(). + with("CSIDriverAzureDisk"). // sig-storage, jsafrane, OCP specific + with("CSIDriverVSphere"). // sig-storage, jsafrane, OCP specific + with("CSIMigrationAWS"). // sig-storage, jsafrane, Kubernetes feature gate + with("CSIMigrationOpenStack"). // sig-storage, jsafrane, Kubernetes feature gate + toFeatures(), + LatencySensitive: newDefaultFeatures(). + with( + "TopologyManager", // sig-pod, sjenning + ). + toFeatures(), + IPv6DualStackNoUpgrade: newDefaultFeatures(). + with( + "IPv6DualStack", // sig-network, danwinship + ). + toFeatures(), +} + +var defaultFeatures = &FeatureGateEnabledDisabled{ + Enabled: []string{ + "APIPriorityAndFairness", // sig-apimachinery, deads2k + "RotateKubeletServerCertificate", // sig-pod, sjenning + "SupportPodPidsLimit", // sig-pod, sjenning + "NodeDisruptionExclusion", // sig-scheduling, ccoleman + "ServiceNodeExclusion", // sig-scheduling, ccoleman + "DownwardAPIHugePages", // sig-node, rphillips + }, + Disabled: []string{ + "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman + }, +} + +type featureSetBuilder struct { + forceOn []string + forceOff []string +} + +func newDefaultFeatures() *featureSetBuilder { + return &featureSetBuilder{} +} + +func (f *featureSetBuilder) with(forceOn ...string) *featureSetBuilder { + f.forceOn = append(f.forceOn, forceOn...) + return f +} + +func (f *featureSetBuilder) without(forceOff ...string) *featureSetBuilder { + f.forceOff = append(f.forceOff, forceOff...) + return f +} + +func (f *featureSetBuilder) isForcedOff(needle string) bool { + for _, forcedOff := range f.forceOff { + if needle == forcedOff { + return true + } + } + return false +} + +func (f *featureSetBuilder) isForcedOn(needle string) bool { + for _, forceOn := range f.forceOn { + if needle == forceOn { + return true + } + } + return false +} + +func (f *featureSetBuilder) toFeatures() *FeatureGateEnabledDisabled { + finalOn := []string{} + finalOff := []string{} + + // only add the default enabled features if they haven't been explicitly set off + for _, defaultOn := range defaultFeatures.Enabled { + if !f.isForcedOff(defaultOn) { + finalOn = append(finalOn, defaultOn) + } + } + for _, currOn := range f.forceOn { + if f.isForcedOff(currOn) { + panic("coding error, you can't have features both on and off") + } + finalOn = append(finalOn, currOn) + } + + // only add the default disabled features if they haven't been explicitly set on + for _, defaultOff := range defaultFeatures.Disabled { + if !f.isForcedOn(defaultOff) { + finalOff = append(finalOff, defaultOff) + } + } + for _, currOff := range f.forceOff { + finalOff = append(finalOff, currOff) + } + + return &FeatureGateEnabledDisabled{ + Enabled: finalOn, + Disabled: finalOff, + } +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go new file mode 100644 index 0000000000..8b762a5a69 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -0,0 +1,123 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image governs policies related to imagestream imports and runtime configuration +// for external registries. It allows cluster admins to configure which registries +// OpenShift is allowed to import images from, extra CA trust bundles for external +// registries, and policies to block or allow registry hostnames. +// When exposing OpenShift's image registry to the public, this also lets cluster +// admins specify the external hostname. +type Image struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ImageStatus `json:"status"` +} + +type ImageSpec struct { + // allowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + // +optional + AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted during imagestream import, pod image pull, build image pull, and + // imageregistry pullthrough. + // The namespace for this config map is openshift-config. + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + + // registrySources contains configuration that determines how the container runtime + // should treat individual registries when accessing images for builds+pods. (e.g. + // whether or not to allow insecure access). It does not contain configuration for the + // internal cluster registry. + // +optional + RegistrySources RegistrySources `json:"registrySources"` +} + +type ImageStatus struct { + + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // This value is set by the image registry operator which controls the internal registry + // hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + // +optional + InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Image `json:"items"` +} + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // domainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + // +optional + Insecure bool `json:"insecure,omitempty"` +} + +// RegistrySources holds cluster-wide information about how to handle the registries config. +type RegistrySources struct { + // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + // +optional + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + // blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + BlockedRegistries []string `json:"blockedRegistries,omitempty"` + // allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + AllowedRegistries []string `json:"allowedRegistries,omitempty"` + // containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified + // domains in their pull specs. Registries will be searched in the order provided in the list. + // Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports. + // +optional + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Format=hostname + // +listType=set + ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go new file mode 100644 index 0000000000..6e78d5ea6d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -0,0 +1,557 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status + +// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` +type Infrastructure struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec InfrastructureSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status InfrastructureStatus `json:"status"` +} + +// InfrastructureSpec contains settings that apply to the cluster infrastructure. +type InfrastructureSpec struct { + // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. + // This configuration file is used to configure the Kubernetes cloud provider integration + // when using the built-in cloud provider integration or the external cloud controller manager. + // The namespace for this config map is openshift-config. + // + // cloudConfig should only be consumed by the kube_cloud_config controller. + // The controller is responsible for using the user configuration in the spec + // for various platforms and combining that with the user provided ConfigMap in this field + // to create a stitched kube cloud config. + // The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace + // with the kube cloud config is stored in `cloud.conf` key. + // All the clients are expected to use the generated ConfigMap only. + // + // +optional + CloudConfig ConfigMapFileReference `json:"cloudConfig"` + + // platformSpec holds desired information specific to the underlying + // infrastructure provider. + PlatformSpec PlatformSpec `json:"platformSpec,omitempty"` +} + +// InfrastructureStatus describes the infrastructure the cluster is leveraging. +type InfrastructureStatus struct { + // infrastructureName uniquely identifies a cluster with a human friendly name. + // Once set it should not be changed. Must be of max length 27 and must have only + // alphanumeric or hyphen characters. + InfrastructureName string `json:"infrastructureName"` + + // platform is the underlying infrastructure provider for the cluster. + // + // Deprecated: Use platformStatus.type instead. + Platform PlatformType `json:"platform,omitempty"` + + // platformStatus holds status information specific to the underlying + // infrastructure provider. + // +optional + PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"` + + // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering + // etcd servers and clients. + // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. + EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` + + // apiServerURL is a valid URI with scheme 'https', address and + // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console + // to tell users where to find the Kubernetes API. + APIServerURL string `json:"apiServerURL"` + + // apiServerInternalURL is a valid URI with scheme 'https', + // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components + // like kubelets, to contact the Kubernetes API server using the + // infrastructure provider rather than Kubernetes networking. + APIServerInternalURL string `json:"apiServerInternalURI"` + + // controlPlaneTopology expresses the expectations for operands that normally run on control nodes. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // +kubebuilder:default=HighlyAvailable + ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` + + // infrastructureTopology expresses the expectations for infrastructure services that do not run on control + // plane nodes, usually indicated by a node selector for a `role` value + // other than `master`. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // +kubebuilder:default=HighlyAvailable + InfrastructureTopology TopologyMode `json:"infrastructureTopology"` +} + +// TopologyMode defines the topology mode of the control/infra nodes. +// +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica +type TopologyMode string + +const ( + // "HighlyAvailable" is for operators to configure high-availability as much as possible. + HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable" + + // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. + SingleReplicaTopologyMode TopologyMode = "SingleReplica" +) + +// PlatformType is a specific supported infrastructure provider. +// +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal +type PlatformType string + +const ( + // AWSPlatformType represents Amazon Web Services infrastructure. + AWSPlatformType PlatformType = "AWS" + + // AzurePlatformType represents Microsoft Azure infrastructure. + AzurePlatformType PlatformType = "Azure" + + // BareMetalPlatformType represents managed bare metal infrastructure. + BareMetalPlatformType PlatformType = "BareMetal" + + // GCPPlatformType represents Google Cloud Platform infrastructure. + GCPPlatformType PlatformType = "GCP" + + // LibvirtPlatformType represents libvirt infrastructure. + LibvirtPlatformType PlatformType = "Libvirt" + + // OpenStackPlatformType represents OpenStack infrastructure. + OpenStackPlatformType PlatformType = "OpenStack" + + // NonePlatformType means there is no infrastructure provider. + NonePlatformType PlatformType = "None" + + // VSpherePlatformType represents VMWare vSphere infrastructure. + VSpherePlatformType PlatformType = "VSphere" + + // OvirtPlatformType represents oVirt/RHV infrastructure. + OvirtPlatformType PlatformType = "oVirt" + + // IBMCloudPlatformType represents IBM Cloud infrastructure. + IBMCloudPlatformType PlatformType = "IBMCloud" + + // KubevirtPlatformType represents KubeVirt/Openshift Virtualization infrastructure. + KubevirtPlatformType PlatformType = "KubeVirt" + + // EquinixMetalPlatformType represents Equinix Metal infrastructure. + EquinixMetalPlatformType PlatformType = "EquinixMetal" +) + +// IBMCloudProviderType is a specific supported IBM Cloud provider cluster type +type IBMCloudProviderType string + +const ( + // Classic means that the IBM Cloud cluster is using classic infrastructure + IBMCloudProviderTypeClassic IBMCloudProviderType = "Classic" + + // VPC means that the IBM Cloud cluster is using VPC infrastructure + IBMCloudProviderTypeVPC IBMCloudProviderType = "VPC" +) + +// PlatformSpec holds the desired state specific to the underlying infrastructure provider +// of the current cluster. Since these are used at spec-level for the underlying cluster, it +// is supposed that only one of the spec structs is set. +type PlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", and "None". Individual components may not support + // all platforms, and must handle unrecognized platforms as None if they do + // not support that platform. + // + // +unionDiscriminator + Type PlatformType `json:"type"` + + // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformSpec `json:"aws,omitempty"` + + // Azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformSpec `json:"azure,omitempty"` + + // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformSpec `json:"gcp,omitempty"` + + // BareMetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"` + + // OpenStack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` + + // Ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"` + + // VSphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"` + + // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + + // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` + + // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` +} + +// PlatformStatus holds the current status specific to the underlying infrastructure provider +// of the current cluster. Since these are used at status-level for the underlying cluster, it +// is supposed that only one of the status structs is set. +type PlatformStatus struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "EquinixMetal", and "None". Individual components may not support + // all platforms, and must handle unrecognized platforms as None if they do + // not support that platform. + // + // This value will be synced with to the `status.platform` and `status.platformStatus.type`. + // Currently this value cannot be changed once set. + Type PlatformType `json:"type"` + + // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformStatus `json:"aws,omitempty"` + + // Azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformStatus `json:"azure,omitempty"` + + // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformStatus `json:"gcp,omitempty"` + + // BareMetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"` + + // OpenStack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` + + // Ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` + + // VSphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"` + + // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` + + // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"` + + // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` +} + +// AWSServiceEndpoint store the configuration of a custom url to +// override existing defaults of AWS Services. +type AWSServiceEndpoint struct { + // name is the name of the AWS service. + // The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AWSPlatformSpec struct { + // serviceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider. +type AWSPlatformStatus struct { + // region holds the default AWS region for new AWS resources created by the cluster. + Region string `json:"region"` + + // ServiceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // resourceTags is a list of additional tags to apply to AWS resources created for the cluster. + // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. + // AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags + // available for the user. + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` +} + +// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +type AWSResourceTag struct { + // key is the key of the tag + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Key string `json:"key"` + // value is the value of the tag. + // Some AWS service do not support empty values. Since tags are added to resources in many services, the + // length of the tag value must meet the requirements of all services. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Value string `json:"value"` +} + +// AzurePlatformSpec holds the desired state of the Azure infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AzurePlatformSpec struct{} + +// AzurePlatformStatus holds the current status of the Azure infrastructure provider. +type AzurePlatformStatus struct { + // resourceGroupName is the Resource Group for new Azure resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName"` + + // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. + // If empty, the value is same as ResourceGroupName. + // +optional + NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"` + + // cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK + // with the appropriate Azure API endpoints. + // If empty, the value is equal to `AzurePublicCloud`. + // +optional + CloudName AzureCloudEnvironment `json:"cloudName,omitempty"` +} + +// AzureCloudEnvironment is the name of the Azure cloud environment +// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud +type AzureCloudEnvironment string + +const ( + // AzurePublicCloud is the general-purpose, public Azure cloud environment. + AzurePublicCloud AzureCloudEnvironment = "AzurePublicCloud" + + // AzureUSGovernmentCloud is the Azure cloud environment for the US government. + AzureUSGovernmentCloud AzureCloudEnvironment = "AzureUSGovernmentCloud" + + // AzureChinaCloud is the Azure cloud environment used in China. + AzureChinaCloud AzureCloudEnvironment = "AzureChinaCloud" + + // AzureGermanCloud is the Azure cloud environment used in Germany. + AzureGermanCloud AzureCloudEnvironment = "AzureGermanCloud" +) + +// GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. +// This only includes fields that can be modified in the cluster. +type GCPPlatformSpec struct{} + +// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. +type GCPPlatformStatus struct { + // resourceGroupName is the Project ID for new GCP resources created for the cluster. + ProjectID string `json:"projectID"` + + // region holds the region for new GCP resources created for the cluster. + Region string `json:"region"` +} + +// BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. +// This only includes fields that can be modified in the cluster. +type BareMetalPlatformSpec struct{} + +// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. +// For more information about the network architecture used with the BareMetal platform type, see: +// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md +type BareMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // BareMetal deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. +// This only includes fields that can be modified in the cluster. +type OpenStackPlatformSpec struct{} + +// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider. +type OpenStackPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // cloudName is the name of the desired OpenStack cloud in the + // client configuration file (`clouds.yaml`). + CloudName string `json:"cloudName,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // OpenStack deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type OvirtPlatformSpec struct{} + +// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider. +type OvirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. +// This only includes fields that can be modified in the cluster. +type VSpherePlatformSpec struct{} + +// VSpherePlatformStatus holds the current status of the vSphere infrastructure provider. +type VSpherePlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // vSphere deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type IBMCloudPlatformSpec struct{} + +//IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider. +type IBMCloudPlatformStatus struct { + // Location is where the cluster has been deployed + Location string `json:"location,omitempty"` + + // ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName,omitempty"` + + // ProviderType indicates the type of cluster that was created + ProviderType IBMCloudProviderType `json:"providerType,omitempty"` +} + +// KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type KubevirtPlatformSpec struct{} + +// KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider. +type KubevirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. +// This only includes fields that can be modified in the cluster. +type EquinixMetalPlatformSpec struct{} + +// EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider. +type EquinixMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InfrastructureList is +type InfrastructureList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Infrastructure `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go new file mode 100644 index 0000000000..9451adc278 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -0,0 +1,181 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress holds cluster-wide information about ingress, including the default ingress domain +// used for routes. The canonical name is `cluster`. +type Ingress struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec IngressSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status IngressStatus `json:"status"` +} + +type IngressSpec struct { + // domain is used to generate a default host name for a route when the + // route's host name is empty. The generated host name will follow this + // pattern: "..". + // + // It is also used as the default wildcard domain suffix for ingress. The + // default ingresscontroller domain will follow this pattern: "*.". + // + // Once set, changing domain is not currently supported. + Domain string `json:"domain"` + + // appsDomain is an optional domain to use instead of the one specified + // in the domain field when a Route is created without specifying an explicit + // host. If appsDomain is nonempty, this value is used to generate default + // host values for Route. Unlike domain, appsDomain may be modified after + // installation. + // This assumes a new ingresscontroller has been setup with a wildcard + // certificate. + // +optional + AppsDomain string `json:"appsDomain,omitempty"` + + // componentRoutes is an optional list of routes that are managed by OpenShift components + // that a cluster-admin is able to configure the hostname and serving certificate for. + // The namespace and name of each route in this list should match an existing entry in the + // status.componentRoutes list. + // + // To determine the set of configurable Routes, look at namespace and name of entries in the + // .status.componentRoutes list, where participating operators write the status of + // configurable routes. + // +optional + ComponentRoutes []ComponentRouteSpec `json:"componentRoutes,omitempty"` +} + +// ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. +// +kubebuilder:validation:Pattern="^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=512 +type ConsumingUser string + +// Hostname is an alias for hostname string validation. +// +kubebuilder:validation:Format=hostname +type Hostname string + +type IngressStatus struct { + // componentRoutes is where participating operators place the current route status for routes whose + // hostnames and serving certificates can be customized by the cluster-admin. + // +optional + ComponentRoutes []ComponentRouteStatus `json:"componentRoutes,omitempty"` +} + +// ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. +type ComponentRouteSpec struct { + // namespace is the namespace of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // hostname is the hostname that should be used by the route. + // +kubebuilder:validation:Required + // +required + Hostname Hostname `json:"hostname"` + + // servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. + // The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + ServingCertKeyPairSecret SecretNameReference `json:"servingCertKeyPairSecret"` +} + +// ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. +type ComponentRouteStatus struct { + // namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace + // ensures that no two components will conflict and the same component can be installed multiple times. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. It does not have to be the actual name of a route resource + // but it cannot be renamed. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // defaultHostname is the hostname of this route prior to customization. + // +kubebuilder:validation:Required + // +required + DefaultHostname Hostname `json:"defaultHostname"` + + // consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + // +kubebuilder:validation:MaxItems=5 + // +optional + ConsumingUsers []ConsumingUser `json:"consumingUsers,omitempty"` + + // currentHostnames is the list of current names used by the route. Typically, this list should consist of a single + // hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + // +kubebuilder:validation:MinItems=1 + // +optional + CurrentHostnames []Hostname `json:"currentHostnames,omitempty"` + + // conditions are used to communicate the state of the componentRoutes entry. + // + // Supported conditions include Available, Degraded and Progressing. + // + // If available is true, the content served by the route can be accessed by users. This includes cases + // where a default may continue to serve content while the customized route specified by the cluster-admin + // is being configured. + // + // If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. + // The currentHostnames field may or may not be in effect. + // + // If Progressing is true, that means the component is taking some action related to the componentRoutes entry. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Required + // +required + RelatedObjects []ObjectReference `json:"relatedObjects"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type IngressList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Ingress `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go new file mode 100644 index 0000000000..ebfdf01626 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -0,0 +1,144 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. +// Please view network.spec for an explanation on what applies when configuring this resource. +type Network struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration. + // As a general rule, this SHOULD NOT be read directly. Instead, you should + // consume the NetworkStatus, as it indicates the currently deployed configuration. + // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + // +kubebuilder:validation:Required + // +required + Spec NetworkSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status NetworkStatus `json:"status"` +} + +// NetworkSpec is the desired network configuration. +// As a general rule, this SHOULD NOT be read directly. Instead, you should +// consume the NetworkStatus, as it indicates the currently deployed configuration. +// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. +type NetworkSpec struct { + // IP address pool to use for pod IPs. + // This field is immutable after installation. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // IP address pool for services. + // Currently, we only support a single entry here. + // This field is immutable after installation. + ServiceNetwork []string `json:"serviceNetwork"` + + // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). + // This should match a value that the cluster-network-operator understands, + // or else no networking will be installed. + // Currently supported values are: + // - OpenShiftSDN + // This field is immutable after installation. + NetworkType string `json:"networkType"` + + // externalIP defines configuration for controllers that + // affect Service.ExternalIP. If nil, then ExternalIP is + // not allowed to be set. + // +optional + ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"` + + // The port range allowed for Services of type NodePort. + // If not specified, the default of 30000-32767 will be used. + // Such Services without a NodePort specified will have one + // automatically allocated from this range. + // This parameter can be updated after the cluster is + // installed. + // +kubebuilder:validation:Pattern=`^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` + ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"` +} + +// NetworkStatus is the current network configuration. +type NetworkStatus struct { + // IP address pool to use for pod IPs. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // IP address pool for services. + // Currently, we only support a single entry here. + ServiceNetwork []string `json:"serviceNetwork,omitempty"` + + // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + NetworkType string `json:"networkType,omitempty"` + + // ClusterNetworkMTU is the MTU for inter-pod networking. + ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` + + // Migration contains the cluster network migration configuration. + Migration *NetworkMigration `json:"migration,omitempty"` +} + +// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs +// are allocated. +type ClusterNetworkEntry struct { + // The complete block for pod IPs. + CIDR string `json:"cidr"` + + // The size (prefix) of block to allocate to each node. If this + // field is not used by the plugin, it can be left unset. + // +kubebuilder:validation:Minimum=0 + // +optional + HostPrefix uint32 `json:"hostPrefix,omitempty"` +} + +// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field +// of a Service resource. +type ExternalIPConfig struct { + // policy is a set of restrictions applied to the ExternalIP field. + // If nil or empty, then ExternalIP is not allowed to be set. + // +optional + Policy *ExternalIPPolicy `json:"policy,omitempty"` + + // autoAssignCIDRs is a list of CIDRs from which to automatically assign + // Service.ExternalIP. These are assigned when the service is of type + // LoadBalancer. In general, this is only useful for bare-metal clusters. + // In Openshift 3.x, this was misleadingly called "IngressIPs". + // Automatically assigned External IPs are not affected by any + // ExternalIPPolicy rules. + // Currently, only one entry may be provided. + // +optional + AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` +} + +// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP +// field in a Service. If the zero struct is supplied, then none are permitted. +// The policy controller always allows automatically assigned external IPs. +type ExternalIPPolicy struct { + // allowedCIDRs is the list of allowed CIDRs. + AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` + + // rejectedCIDRs is the list of disallowed CIDRs. These take precedence + // over allowedCIDRs. + // +optional + RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Network `json:"items"` +} + +// NetworkMigration represents the cluster network configuration. +type NetworkMigration struct { + // NetworkType is the target plugin that is to be deployed. + // Currently supported values are: OpenShiftSDN, OVNKubernetes + // +kubebuilder:validation:Enum={"OpenShiftSDN","OVNKubernetes"} + NetworkType string `json:"networkType"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go new file mode 100644 index 0000000000..fcbd191aa6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -0,0 +1,562 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// OAuth Server and Identity Provider Config + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. +// It is used to configure the integrated OAuth server. +// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. +type OAuth struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec OAuthSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OAuthStatus `json:"status"` +} + +// OAuthSpec contains desired cluster auth configuration +type OAuthSpec struct { + // identityProviders is an ordered list of ways for a user to identify themselves. + // When this list is empty, no identities are provisioned for users. + // +optional + IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"` + + // tokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // templates allow you to customize pages like the login page. + // +optional + Templates OAuthTemplates `json:"templates"` +} + +// OAuthStatus shows current known state of OAuth server in the cluster +type OAuthStatus struct { + // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig) +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // accessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + + // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect. + // +optional + AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` + + // accessTokenInactivityTimeout defines the token inactivity timeout + // for tokens granted by any client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. Takes valid time + // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed + // value for duration is 300s (5 minutes). If the timeout is configured + // per client, then that value takes precedence. If the timeout value is + // not specified and the client does not override the value, then tokens + // are valid until their lifetime. + // +optional + AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` +} + +const ( + // LoginTemplateKey is the key of the login template in a secret + LoginTemplateKey = "login.html" + + // ProviderSelectionTemplateKey is the key for the provider selection template in a secret + ProviderSelectionTemplateKey = "providers.html" + + // ErrorsTemplateKey is the key for the errors template in a secret + ErrorsTemplateKey = "errors.html" + + // BindPasswordKey is the key for the LDAP bind password in a secret + BindPasswordKey = "bindPassword" + + // ClientSecretKey is the key for the oauth client secret data in a secret + ClientSecretKey = "clientSecret" + + // HTPasswdDataKey is the key for the htpasswd file data in a secret + HTPasswdDataKey = "htpasswd" +) + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // login is the name of a secret that specifies a go template to use to render the login page. + // The key "login.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default login page is used. + // If the specified template is not valid, the default login page is used. + // If unspecified, the default login page is used. + // The namespace for this secret is openshift-config. + // +optional + Login SecretNameReference `json:"login"` + + // providerSelection is the name of a secret that specifies a go template to use to render + // the provider selection page. + // The key "providers.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default provider selection page is used. + // If the specified template is not valid, the default provider selection page is used. + // If unspecified, the default provider selection page is used. + // The namespace for this secret is openshift-config. + // +optional + ProviderSelection SecretNameReference `json:"providerSelection"` + + // error is the name of a secret that specifies a go template to use to render error pages + // during the authentication or grant flow. + // The key "errors.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default error page is used. + // If the specified template is not valid, the default error page is used. + // If unspecified, the default error page is used. + // The namespace for this secret is openshift-config. + // +optional + Error SecretNameReference `json:"error"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // name is used to qualify the identities returned by this provider. + // - It MUST be unique and not shared by any other identity provider used + // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" + // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName + Name string `json:"name"` + + // mappingMethod determines how identities from this provider are mapped to users + // Defaults to "claim" + // +optional + MappingMethod MappingMethodType `json:"mappingMethod,omitempty"` + + IdentityProviderConfig `json:",inline"` +} + +// MappingMethodType specifies how new identities should be mapped to users when they log in +type MappingMethodType string + +const ( + // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user + // with that user name is already mapped to another identity. + // Default. + MappingMethodClaim MappingMethodType = "claim" + + // MappingMethodLookup looks up existing users already mapped to an identity but does not + // automatically provision users or identities. Requires identities and users be set up + // manually or using an external process. + MappingMethodLookup MappingMethodType = "lookup" + + // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with + // that user name already exists, the identity is mapped to the existing user, adding to any + // existing identity mappings for the user. + MappingMethodAdd MappingMethodType = "add" +) + +type IdentityProviderType string + +const ( + // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth + IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth" + + // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials + IdentityProviderTypeGitHub IdentityProviderType = "GitHub" + + // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials + IdentityProviderTypeGitLab IdentityProviderType = "GitLab" + + // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials + IdentityProviderTypeGoogle IdentityProviderType = "Google" + + // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file + IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd" + + // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials + IdentityProviderTypeKeystone IdentityProviderType = "Keystone" + + // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials + IdentityProviderTypeLDAP IdentityProviderType = "LDAP" + + // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials + IdentityProviderTypeOpenID IdentityProviderType = "OpenID" + + // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials + IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader" +) + +// IdentityProviderConfig contains configuration for using a specific identity provider +type IdentityProviderConfig struct { + // type identifies the identity provider type for this entry. + Type IdentityProviderType `json:"type"` + + // Provider-specific configuration + // The json tag MUST match the `Type` specified above, case-insensitively + // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided + + // basicAuth contains configuration options for the BasicAuth IdP + // +optional + BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"` + + // github enables user authentication using GitHub credentials + // +optional + GitHub *GitHubIdentityProvider `json:"github,omitempty"` + + // gitlab enables user authentication using GitLab credentials + // +optional + GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"` + + // google enables user authentication using Google credentials + // +optional + Google *GoogleIdentityProvider `json:"google,omitempty"` + + // htpasswd enables user authentication using an HTPasswd file to validate credentials + // +optional + HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"` + + // keystone enables user authentication using keystone password credentials + // +optional + Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"` + + // ldap enables user authentication using LDAP credentials + // +optional + LDAP *LDAPIdentityProvider `json:"ldap,omitempty"` + + // openID enables user authentication using OpenID credentials + // +optional + OpenID *OpenIDIdentityProvider `json:"openID,omitempty"` + + // requestHeader enables user authentication using request header credentials + // +optional + RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"` +} + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +type BasicAuthIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server + OAuthRemoteConnectionInfo `json:",inline"` +} + +// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection +type OAuthRemoteConnectionInfo struct { + // url is the remote URL to connect to + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // tlsClientCert is an optional reference to a secret by name that contains the + // PEM-encoded TLS client certificate to present when connecting to the server. + // The key "tls.crt" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientCert SecretNameReference `json:"tlsClientCert"` + + // tlsClientKey is an optional reference to a secret by name that contains the + // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. + // The key "tls.key" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientKey SecretNameReference `json:"tlsClientKey"` +} + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +type HTPasswdIdentityProvider struct { + // fileData is a required reference to a secret by name containing the data to use as the htpasswd file. + // The key "htpasswd" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // If the specified htpasswd data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + FileData SecretNameReference `json:"fileData"` +} + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +type LDAPIdentityProvider struct { + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. + // The syntax of the URL is: + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + + // bindDN is an optional DN to bind with during the search phase. + // +optional + BindDN string `json:"bindDN"` + + // bindPassword is an optional reference to a secret by name + // containing a password to bind with during the search phase. + // The key "bindPassword" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + BindPassword SecretNameReference `json:"bindPassword"` + + // insecure, if true, indicates the connection should not use TLS + // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always + // attempt to connect using TLS, even when `insecure` is set to `true` + // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to + // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830. + Insecure bool `json:"insecure"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // id is the list of attributes whose values should be used as the user ID. Required. + // First non-empty attribute is used. At least one attribute is required. If none of the listed + // attribute have a value, authentication fails. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + + // preferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +optional + Email []string `json:"email,omitempty"` +} + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +type KeystoneIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server + OAuthRemoteConnectionInfo `json:",inline"` + + // domainName is required for keystone v3 + DomainName string `json:"domainName"` + + // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration + // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID + // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility + // +optional + // UseUsernameIdentity bool `json:"useUsernameIdentity"` +} + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +type RequestHeaderIdentityProvider struct { + // loginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when login is set to true. + LoginURL string `json:"loginURL"` + + // challengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be + // redirected here. + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when challenge is set to true. + ChallengeURL string `json:"challengeURL"` + + // ca is a required reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // Specifically, it allows verification of incoming requests to prevent header spoofing. + // The key "ca.crt" is used to locate the data. + // If the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // The namespace for this config map is openshift-config. + ClientCA ConfigMapNameReference `json:"ca"` + + // clientCommonNames is an optional list of common names to require a match from. If empty, any + // client certificate validated against the clientCA bundle is considered authoritative. + // +optional + ClientCommonNames []string `json:"clientCommonNames,omitempty"` + + // headers is the set of headers to check for identity information + Headers []string `json:"headers"` + + // preferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + + // nameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + + // emailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +type GitHubIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // organizations optionally restricts which organizations are allowed to log in + // +optional + Organizations []string `json:"organizations,omitempty"` + + // teams optionally restricts which teams are allowed to log in. Format is /. + // +optional + Teams []string `json:"teams,omitempty"` + + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of + // GitHub Enterprise. + // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + // +optional + Hostname string `json:"hostname"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // This can only be configured when hostname is set to a non-empty value. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +type GitLabIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // url is the oauth server base URL + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +type GoogleIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + // +optional + HostedDomain string `json:"hostedDomain"` +} + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +type OpenIDIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // extraScopes are any scopes to request in addition to the standard "openid" scope. + // +optional + ExtraScopes []string `json:"extraScopes,omitempty"` + + // extraAuthorizeParameters are any custom parameters to add to the authorize request. + // +optional + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"` + + // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. + // It must use the https scheme with no query or fragment component. + Issuer string `json:"issuer"` + + // claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// UserIDClaim is the claim used to provide a stable identifier for OIDC identities. +// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability +// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can +// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique +// and never reassigned within the Issuer for a particular End-User, as described in Section 2. +// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the +// iss Claim and the sub Claim." +const UserIDClaim = "sub" + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // preferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the sub claim + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +optional + Email []string `json:"email,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type OAuthList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []OAuth `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go new file mode 100644 index 0000000000..1b2b7f82e9 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -0,0 +1,79 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OperatorHubSpec defines the desired state of OperatorHub +type OperatorHubSpec struct { + // disableAllDefaultSources allows you to disable all the default hub + // sources. If this is true, a specific entry in sources can be used to + // enable a default source. If this is false, a specific entry in + // sources can be used to disable or enable a default source. + // +optional + DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"` + // sources is the list of default hub sources and their configuration. + // If the list is empty, it implies that the default hub sources are + // enabled on the cluster unless disableAllDefaultSources is true. + // If disableAllDefaultSources is true and sources is not empty, + // the configuration present in sources will take precedence. The list of + // default hub sources and their current state will always be reflected in + // the status block. + // +optional + Sources []HubSource `json:"sources,omitempty"` +} + +// OperatorHubStatus defines the observed state of OperatorHub. The current +// state of the default hub sources will always be reflected here. +type OperatorHubStatus struct { + // sources encapsulates the result of applying the configuration for each + // hub source + Sources []HubSourceStatus `json:"sources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHub is the Schema for the operatorhubs API. It can be used to change +// the state of the default hub sources for OperatorHub on the cluster from +// enabled to disabled and vice versa. +// +kubebuilder:subresource:status +// +genclient +// +genclient:nonNamespaced +type OperatorHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec OperatorHubSpec `json:"spec"` + Status OperatorHubStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHubList contains a list of OperatorHub +type OperatorHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []OperatorHub `json:"items"` +} + +// HubSource is used to specify the hub source and its configuration +type HubSource struct { + // name is the name of one of the default hub sources + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:Required + Name string `json:"name"` + // disabled is used to disable a default hub source on cluster + // +kubebuilder:Required + Disabled bool `json:"disabled"` +} + +// HubSourceStatus is used to reflect the current state of applying the +// configuration to a default source +type HubSourceStatus struct { + HubSource `json:",omitempty"` + // status indicates success or failure in applying the configuration + Status string `json:"status,omitempty"` + // message provides more information regarding failures + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go new file mode 100644 index 0000000000..244ce3ef8f --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -0,0 +1,54 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Project holds cluster-wide information about Project. The canonical name is `cluster` +type Project struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ProjectSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProjectStatus `json:"status"` +} + +// TemplateReference references a template in a specific namespace. +// The namespace must be specified at the point of use. +type TemplateReference struct { + // name is the metadata.name of the referenced project request template + Name string `json:"name"` +} + +// ProjectSpec holds the project creation configuration. +type ProjectSpec struct { + // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + // +optional + ProjectRequestMessage string `json:"projectRequestMessage"` + + // projectRequestTemplate is the template to use for creating projects in response to projectrequest. + // This must point to a template in 'openshift-config' namespace. It is optional. + // If it is not specified, a default template is used. + // + // +optional + ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"` +} + +type ProjectStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Project `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go new file mode 100644 index 0000000000..211e501e08 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -0,0 +1,94 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` +type Proxy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds user-settable values for the proxy configuration + // +kubebuilder:validation:Required + // +required + Spec ProxySpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProxyStatus `json:"status"` +} + +// ProxySpec contains cluster proxy creation configuration. +type ProxySpec struct { + // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + // Empty means unset and will not result in an env var. + // +optional + NoProxy string `json:"noProxy,omitempty"` + + // readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + // +optional + ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"` + + // trustedCA is a reference to a ConfigMap containing a CA certificate bundle. + // The trustedCA field should only be consumed by a proxy validator. The + // validator is responsible for reading the certificate bundle from the required + // key "ca-bundle.crt", merging it with the system default trust bundle, + // and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle" + // in the "openshift-config-managed" namespace. Clients that expect to make + // proxy connections must use the trusted-ca-bundle for all HTTPS requests to + // the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as + // well. + // + // The namespace for the ConfigMap referenced by trustedCA is + // "openshift-config". Here is an example ConfigMap (in yaml): + // + // apiVersion: v1 + // kind: ConfigMap + // metadata: + // name: user-ca-bundle + // namespace: openshift-config + // data: + // ca-bundle.crt: | + // -----BEGIN CERTIFICATE----- + // Custom CA certificate bundle. + // -----END CERTIFICATE----- + // + // +optional + TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"` +} + +// ProxyStatus shows current known state of the cluster proxy. +type ProxyStatus struct { + // httpProxy is the URL of the proxy for HTTP requests. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + // +optional + NoProxy string `json:"noProxy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ProxyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Proxy `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go new file mode 100644 index 0000000000..570f8affce --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -0,0 +1,100 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler +// and influence its placement decisions. The canonical name for this config is `cluster`. +type Scheduler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec SchedulerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status SchedulerStatus `json:"status"` +} + +type SchedulerSpec struct { + // DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. + // policy is a reference to a ConfigMap containing scheduler policy which has + // user specified predicates and priorities. If this ConfigMap is not available + // scheduler will default to use DefaultAlgorithmProvider. + // The namespace for this configmap is openshift-config. + // +optional + Policy ConfigMapNameReference `json:"policy,omitempty"` + // profile sets which scheduling profile should be set in order to configure scheduling + // decisions for new pods. + // + // Valid values are "LowNodeUtilization", "HighNodeUtilization", "NoScoring" + // Defaults to "LowNodeUtilization" + // +optional + Profile SchedulerProfile `json:"profile,omitempty"` + // defaultNodeSelector helps set the cluster-wide default node selector to + // restrict pod placement to specific nodes. This is applied to the pods + // created in all namespaces and creates an intersection with any existing + // nodeSelectors already set on a pod, additionally constraining that pod's selector. + // For example, + // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector + // field in pod spec to "type=user-node,region=east" to all pods created + // in all namespaces. Namespaces having project-wide node selectors won't be + // impacted even if this field is set. This adds an annotation section to + // the namespace. + // For example, if a new namespace is created with + // node-selector='type=user-node,region=east', + // the annotation openshift.io/node-selector: type=user-node,region=east + // gets added to the project. When the openshift.io/node-selector annotation + // is set on the project the value is used in preference to the value we are setting + // for defaultNodeSelector field. + // For instance, + // openshift.io/node-selector: "type=user-node,region=west" means + // that the default of "type=user-node,region=east" set in defaultNodeSelector + // would not be applied. + // +optional + DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` + // MastersSchedulable allows masters nodes to be schedulable. When this flag is + // turned on, all the master nodes in the cluster will be made schedulable, + // so that workload pods can run on them. The default value for this field is false, + // meaning none of the master nodes are schedulable. + // Important Note: Once the workload pods start running on the master nodes, + // extreme care must be taken to ensure that cluster-critical control plane components + // are not impacted. + // Please turn on this field after doing due diligence. + // +optional + MastersSchedulable bool `json:"mastersSchedulable"` +} + +// +kubebuilder:validation:Enum="";LowNodeUtilization;HighNodeUtilization;NoScoring +type SchedulerProfile string + +var ( + // LowNodeUtililization is the default, and defines a scheduling profile which prefers to + // spread pods evenly among nodes targeting low resource consumption on each node. + LowNodeUtilization SchedulerProfile = "LowNodeUtilization" + + // HighNodeUtilization defines a scheduling profile which packs as many pods as possible onto + // as few nodes as possible targeting a small node count but high resource usage on each node. + HighNodeUtilization SchedulerProfile = "HighNodeUtilization" + + // NoScoring defines a scheduling profile which tries to provide lower-latency scheduling + // at the expense of potentially less optimal pod placement decisions. + NoScoring SchedulerProfile = "NoScoring" +) + +type SchedulerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SchedulerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Scheduler `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go new file mode 100644 index 0000000000..9dbacb9966 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -0,0 +1,262 @@ +package v1 + +// TLSSecurityProfile defines the schema for a TLS security profile. This object +// is used by operators to apply TLS security settings to operands. +// +union +type TLSSecurityProfile struct { + // type is one of Old, Intermediate, Modern or Custom. Custom provides + // the ability to specify individual TLS security profile parameters. + // Old, Intermediate and Modern are TLS security profiles based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + // + // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers + // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be + // reduced. + // + // Note that the Modern profile is currently not supported because it is not + // yet well adopted by common software libraries. + // + // +unionDiscriminator + // +optional + Type TLSProfileType `json:"type"` + // old is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - DHE-RSA-AES128-GCM-SHA256 + // - DHE-RSA-AES256-GCM-SHA384 + // - DHE-RSA-CHACHA20-POLY1305 + // - ECDHE-ECDSA-AES128-SHA256 + // - ECDHE-RSA-AES128-SHA256 + // - ECDHE-ECDSA-AES128-SHA + // - ECDHE-RSA-AES128-SHA + // - ECDHE-ECDSA-AES256-SHA384 + // - ECDHE-RSA-AES256-SHA384 + // - ECDHE-ECDSA-AES256-SHA + // - ECDHE-RSA-AES256-SHA + // - DHE-RSA-AES128-SHA256 + // - DHE-RSA-AES256-SHA256 + // - AES128-GCM-SHA256 + // - AES256-GCM-SHA384 + // - AES128-SHA256 + // - AES256-SHA256 + // - AES128-SHA + // - AES256-SHA + // - DES-CBC3-SHA + // minTLSVersion: TLSv1.0 + // + // +optional + // +nullable + Old *OldTLSProfile `json:"old,omitempty"` + // intermediate is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - DHE-RSA-AES128-GCM-SHA256 + // - DHE-RSA-AES256-GCM-SHA384 + // minTLSVersion: TLSv1.2 + // + // +optional + // +nullable + Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"` + // modern is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // minTLSVersion: TLSv1.3 + // + // NOTE: Currently unsupported. + // + // +optional + // +nullable + Modern *ModernTLSProfile `json:"modern,omitempty"` + // custom is a user-defined TLS security profile. Be extremely careful using a custom + // profile as invalid configurations can be catastrophic. An example custom profile + // looks like this: + // + // ciphers: + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // minTLSVersion: TLSv1.1 + // + // +optional + // +nullable + Custom *CustomTLSProfile `json:"custom,omitempty"` +} + +// OldTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility +type OldTLSProfile struct{} + +// IntermediateTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 +type IntermediateTLSProfile struct{} + +// ModernTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility +type ModernTLSProfile struct{} + +// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful +// using a custom TLS profile as invalid configurations can be catastrophic. +type CustomTLSProfile struct { + TLSProfileSpec `json:",inline"` +} + +// TLSProfileType defines a TLS security profile type. +// +kubebuilder:validation:Enum=Old;Intermediate;Modern;Custom +type TLSProfileType string + +const ( + // Old is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + TLSProfileOldType TLSProfileType = "Old" + // Intermediate is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 + TLSProfileIntermediateType TLSProfileType = "Intermediate" + // Modern is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + TLSProfileModernType TLSProfileType = "Modern" + // Custom is a TLS security profile that allows for user-defined parameters. + TLSProfileCustomType TLSProfileType = "Custom" +) + +// TLSProfileSpec is the desired behavior of a TLSSecurityProfile. +type TLSProfileSpec struct { + // ciphers is used to specify the cipher algorithms that are negotiated + // during the TLS handshake. Operators may remove entries their operands + // do not support. For example, to use DES-CBC3-SHA (yaml): + // + // ciphers: + // - DES-CBC3-SHA + // + Ciphers []string `json:"ciphers"` + // minTLSVersion is used to specify the minimal version of the TLS protocol + // that is negotiated during the TLS handshake. For example, to use TLS + // versions 1.1, 1.2 and 1.3 (yaml): + // + // minTLSVersion: TLSv1.1 + // + // NOTE: currently the highest minTLSVersion allowed is VersionTLS12 + // + MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"` +} + +// TLSProtocolVersion is a way to specify the protocol version used for TLS connections. +// Protocol versions are based on the following most common TLS configurations: +// +// https://ssl-config.mozilla.org/ +// +// Note that SSLv3.0 is not a supported protocol version due to well known +// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE +// +kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13 +type TLSProtocolVersion string + +const ( + // VersionTLSv10 is version 1.0 of the TLS security protocol. + VersionTLS10 TLSProtocolVersion = "VersionTLS10" + // VersionTLSv11 is version 1.1 of the TLS security protocol. + VersionTLS11 TLSProtocolVersion = "VersionTLS11" + // VersionTLSv12 is version 1.2 of the TLS security protocol. + VersionTLS12 TLSProtocolVersion = "VersionTLS12" + // VersionTLSv13 is version 1.3 of the TLS security protocol. + VersionTLS13 TLSProtocolVersion = "VersionTLS13" +) + +// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec. +// +// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all +// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail, +// just be sure to whitelist only and everything will be ok. +var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ + TLSProfileOldType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + "DHE-RSA-CHACHA20-POLY1305", + "ECDHE-ECDSA-AES128-SHA256", + "ECDHE-RSA-AES128-SHA256", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "ECDHE-ECDSA-AES256-SHA384", + "ECDHE-RSA-AES256-SHA384", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "DHE-RSA-AES128-SHA256", + "DHE-RSA-AES256-SHA256", + "AES128-GCM-SHA256", + "AES256-GCM-SHA384", + "AES128-SHA256", + "AES256-SHA256", + "AES128-SHA", + "AES256-SHA", + "DES-CBC3-SHA", + }, + MinTLSVersion: VersionTLS10, + }, + TLSProfileIntermediateType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + }, + MinTLSVersion: VersionTLS12, + }, + TLSProfileModernType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + }, + MinTLSVersion: VersionTLS13, + }, +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..cb933dac08 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,3889 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServer) DeepCopyInto(out *APIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. +func (in *APIServer) DeepCopy() *APIServer { + if in == nil { + return nil + } + out := new(APIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption. +func (in *APIServerEncryption) DeepCopy() *APIServerEncryption { + if in == nil { + return nil + } + out := new(APIServerEncryption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerList) DeepCopyInto(out *APIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList. +func (in *APIServerList) DeepCopy() *APIServerList { + if in == nil { + return nil + } + out := new(APIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.ServingCertificate = in.ServingCertificate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert. +func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert { + if in == nil { + return nil + } + out := new(APIServerNamedServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) { + *out = *in + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]APIServerNamedServingCert, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts. +func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts { + if in == nil { + return nil + } + out := new(APIServerServingCerts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { + *out = *in + in.ServingCerts.DeepCopyInto(&out.ServingCerts) + out.ClientCA = in.ClientCA + if in.AdditionalCORSAllowedOrigins != nil { + in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Encryption = in.Encryption + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + out.Audit = in.Audit + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec. +func (in *APIServerSpec) DeepCopy() *APIServerSpec { + if in == nil { + return nil + } + out := new(APIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus. +func (in *APIServerStatus) DeepCopy() *APIServerStatus { + if in == nil { + return nil + } + out := new(APIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec. +func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec { + if in == nil { + return nil + } + out := new(AWSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. +func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { + if in == nil { + return nil + } + out := new(AWSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. +func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { + if in == nil { + return nil + } + out := new(AWSResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint. +func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { + if in == nil { + return nil + } + out := new(AWSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { + *out = *in + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make(map[string]AdmissionPluginConfig, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.EnabledAdmissionPlugins != nil { + in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisabledAdmissionPlugins != nil { + in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig. +func (in *AdmissionConfig) DeepCopy() *AdmissionConfig { + if in == nil { + return nil + } + out := new(AdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) { + *out = *in + in.Configuration.DeepCopyInto(&out.Configuration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig. +func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { + if in == nil { + return nil + } + out := new(AdmissionPluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Audit) DeepCopyInto(out *Audit) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit. +func (in *Audit) DeepCopy() *Audit { + if in == nil { + return nil + } + out := new(Audit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + out.OAuthMetadata = in.OAuthMetadata + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]DeprecatedWebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + if in.WebhookTokenAuthenticator != nil { + in, out := &in.WebhookTokenAuthenticator, &out.WebhookTokenAuthenticator + *out = new(WebhookTokenAuthenticator) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec. +func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { + if in == nil { + return nil + } + out := new(AzurePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus. +func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus { + if in == nil { + return nil + } + out := new(AzurePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformSpec) DeepCopyInto(out *BareMetalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformSpec. +func (in *BareMetalPlatformSpec) DeepCopy() *BareMetalPlatformSpec { + if in == nil { + return nil + } + out := new(BareMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus. +func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus { + if in == nil { + return nil + } + out := new(BareMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider. +func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) { + *out = *in + if in.DefaultProxy != nil { + in, out := &in.DefaultProxy, &out.DefaultProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.GitProxy != nil { + in, out := &in.GitProxy, &out.GitProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults. +func (in *BuildDefaults) DeepCopy() *BuildDefaults { + if in == nil { + return nil + } + out := new(BuildDefaults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) { + *out = *in + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForcePull != nil { + in, out := &in.ForcePull, &out.ForcePull + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides. +func (in *BuildOverrides) DeepCopy() *BuildOverrides { + if in == nil { + return nil + } + out := new(BuildOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.BuildDefaults.DeepCopyInto(&out.BuildDefaults) + in.BuildOverrides.DeepCopyInto(&out.BuildOverrides) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertInfo) DeepCopyInto(out *CertInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo. +func (in *CertInfo) DeepCopy() *CertInfo { + if in == nil { + return nil + } + out := new(CertInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides. +func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { + if in == nil { + return nil + } + out := new(ClientConnectionOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator. +func (in *ClusterOperator) DeepCopy() *ClusterOperator { + if in == nil { + return nil + } + out := new(ClusterOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList. +func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList { + if in == nil { + return nil + } + out := new(ClusterOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec. +func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec { + if in == nil { + return nil + } + out := new(ClusterOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]OperandVersion, len(*in)) + copy(*out, *in) + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + in.Extension.DeepCopyInto(&out.Extension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus. +func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus { + if in == nil { + return nil + } + out := new(ClusterOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition. +func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition { + if in == nil { + return nil + } + out := new(ClusterOperatorStatusCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion. +func (in *ClusterVersion) DeepCopy() *ClusterVersion { + if in == nil { + return nil + } + out := new(ClusterVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList. +func (in *ClusterVersionList) DeepCopy() *ClusterVersionList { + if in == nil { + return nil + } + out := new(ClusterVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) { + *out = *in + if in.DesiredUpdate != nil { + in, out := &in.DesiredUpdate, &out.DesiredUpdate + *out = new(Update) + **out = **in + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]ComponentOverride, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec. +func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec { + if in == nil { + return nil + } + out := new(ClusterVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { + *out = *in + in.Desired.DeepCopyInto(&out.Desired) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]UpdateHistory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailableUpdates != nil { + in, out := &in.AvailableUpdates, &out.AvailableUpdates + *out = make([]Release, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus. +func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus { + if in == nil { + return nil + } + out := new(ClusterVersionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride. +func (in *ComponentOverride) DeepCopy() *ComponentOverride { + if in == nil { + return nil + } + out := new(ComponentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteSpec) DeepCopyInto(out *ComponentRouteSpec) { + *out = *in + out.ServingCertKeyPairSecret = in.ServingCertKeyPairSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteSpec. +func (in *ComponentRouteSpec) DeepCopy() *ComponentRouteSpec { + if in == nil { + return nil + } + out := new(ComponentRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteStatus) DeepCopyInto(out *ComponentRouteStatus) { + *out = *in + if in.ConsumingUsers != nil { + in, out := &in.ConsumingUsers, &out.ConsumingUsers + *out = make([]ConsumingUser, len(*in)) + copy(*out, *in) + } + if in.CurrentHostnames != nil { + in, out := &in.CurrentHostnames, &out.CurrentHostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteStatus. +func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { + if in == nil { + return nil + } + out := new(ComponentRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference. +func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference { + if in == nil { + return nil + } + out := new(ConfigMapFileReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference. +func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference { + if in == nil { + return nil + } + out := new(ConfigMapNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication. +func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication { + if in == nil { + return nil + } + out := new(ConsoleAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + out.Authentication = in.Authentication + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates. +func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates { + if in == nil { + return nil + } + out := new(CustomFeatureGates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) { + *out = *in + in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile. +func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile { + if in == nil { + return nil + } + out := new(CustomTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.PublicZone != nil { + in, out := &in.PublicZone, &out.PublicZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + if in.PrivateZone != nil { + in, out := &in.PrivateZone, &out.PrivateZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZone) DeepCopyInto(out *DNSZone) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone. +func (in *DNSZone) DeepCopy() *DNSZone { + if in == nil { + return nil + } + out := new(DNSZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication. +func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication { + if in == nil { + return nil + } + out := new(DelegatedAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization. +func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization { + if in == nil { + return nil + } + out := new(DelegatedAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopyInto(out *DeprecatedWebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedWebhookTokenAuthenticator. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopy() *DeprecatedWebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(DeprecatedWebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformSpec) DeepCopyInto(out *EquinixMetalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformSpec. +func (in *EquinixMetalPlatformSpec) DeepCopy() *EquinixMetalPlatformSpec { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformStatus) DeepCopyInto(out *EquinixMetalPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformStatus. +func (in *EquinixMetalPlatformStatus) DeepCopy() *EquinixMetalPlatformStatus { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { + *out = *in + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. +func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { + if in == nil { + return nil + } + out := new(EtcdConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { + *out = *in + in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. +func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { + if in == nil { + return nil + } + out := new(EtcdStorageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(ExternalIPPolicy) + (*in).DeepCopyInto(*out) + } + if in.AutoAssignCIDRs != nil { + in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig. +func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig { + if in == nil { + return nil + } + out := new(ExternalIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) { + *out = *in + if in.AllowedCIDRs != nil { + in, out := &in.AllowedCIDRs, &out.AllowedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RejectedCIDRs != nil { + in, out := &in.RejectedCIDRs, &out.RejectedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy. +func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy { + if in == nil { + return nil + } + out := new(ExternalIPPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate. +func (in *FeatureGate) DeepCopy() *FeatureGate { + if in == nil { + return nil + } + out := new(FeatureGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled. +func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled { + if in == nil { + return nil + } + out := new(FeatureGateEnabledDisabled) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FeatureGate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList. +func (in *FeatureGateList) DeepCopy() *FeatureGateList { + if in == nil { + return nil + } + out := new(FeatureGateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) { + *out = *in + if in.CustomNoUpgrade != nil { + in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade + *out = new(CustomFeatureGates) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection. +func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection { + if in == nil { + return nil + } + out := new(FeatureGateSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) { + *out = *in + in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec. +func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec { + if in == nil { + return nil + } + out := new(FeatureGateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus. +func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { + if in == nil { + return nil + } + out := new(FeatureGateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformSpec. +func (in *GCPPlatformSpec) DeepCopy() *GCPPlatformSpec { + if in == nil { + return nil + } + out := new(GCPPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus. +func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus { + if in == nil { + return nil + } + out := new(GCPPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + if in.CORSAllowedOrigins != nil { + in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AuditConfig.DeepCopyInto(&out.AuditConfig) + in.StorageConfig.DeepCopyInto(&out.StorageConfig) + in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig) + out.KubeClientConfig = in.KubeClientConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig. +func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { + if in == nil { + return nil + } + out := new(GenericAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + out.LeaderElection = in.LeaderElection + out.Authentication = in.Authentication + out.Authorization = in.Authorization + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig. +func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig { + if in == nil { + return nil + } + out := new(GenericControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) { + *out = *in + out.FileData = in.FileData + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider. +func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. +func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { + if in == nil { + return nil + } + out := new(HTTPServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSource) DeepCopyInto(out *HubSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource. +func (in *HubSource) DeepCopy() *HubSource { + if in == nil { + return nil + } + out := new(HubSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) { + *out = *in + out.HubSource = in.HubSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus. +func (in *HubSourceStatus) DeepCopy() *HubSourceStatus { + if in == nil { + return nil + } + out := new(HubSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec. +func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec { + if in == nil { + return nil + } + out := new(IBMCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformStatus) DeepCopyInto(out *IBMCloudPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformStatus. +func (in *IBMCloudPlatformStatus) DeepCopy() *IBMCloudPlatformStatus { + if in == nil { + return nil + } + out := new(IBMCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuthIdentityProvider) + **out = **in + } + if in.GitHub != nil { + in, out := &in.GitHub, &out.GitHub + *out = new(GitHubIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.GitLab != nil { + in, out := &in.GitLab, &out.GitLab + *out = new(GitLabIdentityProvider) + **out = **in + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(GoogleIdentityProvider) + **out = **in + } + if in.HTPasswd != nil { + in, out := &in.HTPasswd, &out.HTPasswd + *out = new(HTPasswdIdentityProvider) + **out = **in + } + if in.Keystone != nil { + in, out := &in.Keystone, &out.Keystone + *out = new(KeystoneIdentityProvider) + **out = **in + } + if in.LDAP != nil { + in, out := &in.LDAP, &out.LDAP + *out = new(LDAPIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.OpenID != nil { + in, out := &in.OpenID, &out.OpenID + *out = new(OpenIDIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderIdentityProvider) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig. +func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig { + if in == nil { + return nil + } + out := new(IdentityProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = make([]RegistryLocation, len(*in)) + copy(*out, *in) + } + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.RegistrySources.DeepCopyInto(&out.RegistrySources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { + *out = *in + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { + if in == nil { + return nil + } + out := new(ImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure. +func (in *Infrastructure) DeepCopy() *Infrastructure { + if in == nil { + return nil + } + out := new(Infrastructure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Infrastructure) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Infrastructure, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList. +func (in *InfrastructureList) DeepCopy() *InfrastructureList { + if in == nil { + return nil + } + out := new(InfrastructureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfrastructureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) { + *out = *in + out.CloudConfig = in.CloudConfig + in.PlatformSpec.DeepCopyInto(&out.PlatformSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec. +func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { + if in == nil { + return nil + } + out := new(InfrastructureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { + *out = *in + if in.PlatformStatus != nil { + in, out := &in.PlatformStatus, &out.PlatformStatus + *out = new(PlatformStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus. +func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus { + if in == nil { + return nil + } + out := new(InfrastructureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile. +func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { + if in == nil { + return nil + } + out := new(IntermediateTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider. +func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider { + if in == nil { + return nil + } + out := new(KeystoneIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) { + *out = *in + out.ConnectionOverrides = in.ConnectionOverrides + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig. +func (in *KubeClientConfig) DeepCopy() *KubeClientConfig { + if in == nil { + return nil + } + out := new(KubeClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformSpec) DeepCopyInto(out *KubevirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformSpec. +func (in *KubevirtPlatformSpec) DeepCopy() *KubevirtPlatformSpec { + if in == nil { + return nil + } + out := new(KubevirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformStatus) DeepCopyInto(out *KubevirtPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformStatus. +func (in *KubevirtPlatformStatus) DeepCopy() *KubevirtPlatformStatus { + if in == nil { + return nil + } + out := new(KubevirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) { + *out = *in + out.BindPassword = in.BindPassword + out.CA = in.CA + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider. +func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElection) DeepCopyInto(out *LeaderElection) { + *out = *in + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection. +func (in *LeaderElection) DeepCopy() *LeaderElection { + if in == nil { + return nil + } + out := new(LeaderElection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile. +func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile { + if in == nil { + return nil + } + out := new(ModernTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. +func (in *NamedCertificate) DeepCopy() *NamedCertificate { + if in == nil { + return nil + } + out := new(NamedCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExternalIP != nil { + in, out := &in.ExternalIP, &out.ExternalIP + *out = new(ExternalIPConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth) DeepCopyInto(out *OAuth) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth. +func (in *OAuth) DeepCopy() *OAuth { + if in == nil { + return nil + } + out := new(OAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuth) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthList) DeepCopyInto(out *OAuthList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuth, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList. +func (in *OAuthList) DeepCopy() *OAuthList { + if in == nil { + return nil + } + out := new(OAuthList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) { + *out = *in + out.CA = in.CA + out.TLSClientCert = in.TLSClientCert + out.TLSClientKey = in.TLSClientKey + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo. +func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo { + if in == nil { + return nil + } + out := new(OAuthRemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) { + *out = *in + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + out.Templates = in.Templates + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec. +func (in *OAuthSpec) DeepCopy() *OAuthSpec { + if in == nil { + return nil + } + out := new(OAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus. +func (in *OAuthStatus) DeepCopy() *OAuthStatus { + if in == nil { + return nil + } + out := new(OAuthStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + out.Login = in.Login + out.ProviderSelection = in.ProviderSelection + out.Error = in.Error + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile. +func (in *OldTLSProfile) DeepCopy() *OldTLSProfile { + if in == nil { + return nil + } + out := new(OldTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformSpec) DeepCopyInto(out *OpenStackPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformSpec. +func (in *OpenStackPlatformSpec) DeepCopy() *OpenStackPlatformSpec { + if in == nil { + return nil + } + out := new(OpenStackPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus. +func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus { + if in == nil { + return nil + } + out := new(OpenStackPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperandVersion) DeepCopyInto(out *OperandVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion. +func (in *OperandVersion) DeepCopy() *OperandVersion { + if in == nil { + return nil + } + out := new(OperandVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHub) DeepCopyInto(out *OperatorHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub. +func (in *OperatorHub) DeepCopy() *OperatorHub { + if in == nil { + return nil + } + out := new(OperatorHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OperatorHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList. +func (in *OperatorHubList) DeepCopy() *OperatorHubList { + if in == nil { + return nil + } + out := new(OperatorHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec. +func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec { + if in == nil { + return nil + } + out := new(OperatorHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSourceStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus. +func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus { + if in == nil { + return nil + } + out := new(OperatorHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformSpec) DeepCopyInto(out *OvirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformSpec. +func (in *OvirtPlatformSpec) DeepCopy() *OvirtPlatformSpec { + if in == nil { + return nil + } + out := new(OvirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus. +func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { + if in == nil { + return nil + } + out := new(OvirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformSpec) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformSpec) + **out = **in + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformSpec) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformSpec) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformSpec) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformSpec) + **out = **in + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformSpec) + **out = **in + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformSpec) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. +func (in *PlatformSpec) DeepCopy() *PlatformSpec { + if in == nil { + return nil + } + out := new(PlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformStatus) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformStatus) + **out = **in + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformStatus) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformStatus) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformStatus) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformStatus) + **out = **in + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformStatus) + **out = **in + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformStatus) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. +func (in *PlatformStatus) DeepCopy() *PlatformStatus { + if in == nil { + return nil + } + out := new(PlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + out.ProjectRequestTemplate = in.ProjectRequestTemplate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Proxy) DeepCopyInto(out *Proxy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy. +func (in *Proxy) DeepCopy() *Proxy { + if in == nil { + return nil + } + out := new(Proxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Proxy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyList) DeepCopyInto(out *ProxyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Proxy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList. +func (in *ProxyList) DeepCopy() *ProxyList { + if in == nil { + return nil + } + out := new(ProxyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxySpec) DeepCopyInto(out *ProxySpec) { + *out = *in + if in.ReadinessEndpoints != nil { + in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.TrustedCA = in.TrustedCA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec. +func (in *ProxySpec) DeepCopy() *ProxySpec { + if in == nil { + return nil + } + out := new(ProxySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus. +func (in *ProxyStatus) DeepCopy() *ProxyStatus { + if in == nil { + return nil + } + out := new(ProxyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrySources) DeepCopyInto(out *RegistrySources) { + *out = *in + if in.InsecureRegistries != nil { + in, out := &in.InsecureRegistries, &out.InsecureRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.BlockedRegistries != nil { + in, out := &in.BlockedRegistries, &out.BlockedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedRegistries != nil { + in, out := &in.AllowedRegistries, &out.AllowedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerRuntimeSearchRegistries != nil { + in, out := &in.ContainerRuntimeSearchRegistries, &out.ContainerRuntimeSearchRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources. +func (in *RegistrySources) DeepCopy() *RegistrySources { + if in == nil { + return nil + } + out := new(RegistrySources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Release) DeepCopyInto(out *Release) { + *out = *in + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release. +func (in *Release) DeepCopy() *Release { + if in == nil { + return nil + } + out := new(Release) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. +func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { + if in == nil { + return nil + } + out := new(RemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.ClientCA = in.ClientCA + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduler) DeepCopyInto(out *Scheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler. +func (in *Scheduler) DeepCopy() *Scheduler { + if in == nil { + return nil + } + out := new(Scheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerList) DeepCopyInto(out *SchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Scheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList. +func (in *SchedulerList) DeepCopy() *SchedulerList { + if in == nil { + return nil + } + out := new(SchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) { + *out = *in + out.Policy = in.Policy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec. +func (in *SchedulerSpec) DeepCopy() *SchedulerSpec { + if in == nil { + return nil + } + out := new(SchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus. +func (in *SchedulerStatus) DeepCopy() *SchedulerStatus { + if in == nil { + return nil + } + out := new(SchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference. +func (in *SecretNameReference) DeepCopy() *SecretNameReference { + if in == nil { + return nil + } + out := new(SecretNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServingInfo) DeepCopyInto(out *ServingInfo) { + *out = *in + out.CertInfo = in.CertInfo + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]NamedCertificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo. +func (in *ServingInfo) DeepCopy() *ServingInfo { + if in == nil { + return nil + } + out := new(ServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSource) DeepCopyInto(out *StringSource) { + *out = *in + out.StringSourceSpec = in.StringSourceSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource. +func (in *StringSource) DeepCopy() *StringSource { + if in == nil { + return nil + } + out := new(StringSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec. +func (in *StringSourceSpec) DeepCopy() *StringSourceSpec { + if in == nil { + return nil + } + out := new(StringSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) { + *out = *in + if in.Ciphers != nil { + in, out := &in.Ciphers, &out.Ciphers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec. +func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec { + if in == nil { + return nil + } + out := new(TLSProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) { + *out = *in + if in.Old != nil { + in, out := &in.Old, &out.Old + *out = new(OldTLSProfile) + **out = **in + } + if in.Intermediate != nil { + in, out := &in.Intermediate, &out.Intermediate + *out = new(IntermediateTLSProfile) + **out = **in + } + if in.Modern != nil { + in, out := &in.Modern, &out.Modern + *out = new(ModernTLSProfile) + **out = **in + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomTLSProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile. +func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile { + if in == nil { + return nil + } + out := new(TLSSecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateReference) DeepCopyInto(out *TemplateReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference. +func (in *TemplateReference) DeepCopy() *TemplateReference { + if in == nil { + return nil + } + out := new(TemplateReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeout != nil { + in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Update) DeepCopyInto(out *Update) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update. +func (in *Update) DeepCopy() *Update { + if in == nil { + return nil + } + out := new(Update) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) { + *out = *in + in.StartedTime.DeepCopyInto(&out.StartedTime) + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory. +func (in *UpdateHistory) DeepCopy() *UpdateHistory { + if in == nil { + return nil + } + out := new(UpdateHistory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformSpec) DeepCopyInto(out *VSpherePlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformSpec. +func (in *VSpherePlatformSpec) DeepCopy() *VSpherePlatformSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformStatus) DeepCopyInto(out *VSpherePlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformStatus. +func (in *VSpherePlatformStatus) DeepCopy() *VSpherePlatformStatus { + if in == nil { + return nil + } + out := new(VSpherePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..22de664b22 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,1552 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AdmissionConfig = map[string]string{ + "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.", + "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.", +} + +func (AdmissionConfig) SwaggerDoc() map[string]string { + return map_AdmissionConfig +} + +var map_AdmissionPluginConfig = map[string]string{ + "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", + "location": "Location is the path to a configuration file that contains the plugin's configuration", + "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", +} + +func (AdmissionPluginConfig) SwaggerDoc() map[string]string { + return map_AdmissionPluginConfig +} + +var map_AuditConfig = map[string]string{ + "": "AuditConfig holds configuration for the audit capabilities", + "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.", + "auditFilePath": "All requests coming to the apiserver will be logged to this file.", + "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", + "maximumRetainedFiles": "Maximum number of old log files to retain.", + "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", + "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "logFormat": "Format of saved audits (legacy or json).", + "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", + "webHookMode": "Strategy for sending audit events (block or batch).", +} + +func (AuditConfig) SwaggerDoc() map[string]string { + return map_AuditConfig +} + +var map_CertInfo = map[string]string{ + "": "CertInfo relates a certificate with a private key", + "certFile": "CertFile is a file containing a PEM-encoded certificate", + "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", +} + +func (CertInfo) SwaggerDoc() map[string]string { + return map_CertInfo +} + +var map_ClientConnectionOverrides = map[string]string{ + "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "contentType": "contentType is the content type used when sending data to the server from this client.", + "qps": "qps controls the number of queries per second allowed for this connection.", + "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.", +} + +func (ClientConnectionOverrides) SwaggerDoc() map[string]string { + return map_ClientConnectionOverrides +} + +var map_ConfigMapFileReference = map[string]string{ + "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", +} + +func (ConfigMapFileReference) SwaggerDoc() map[string]string { + return map_ConfigMapFileReference +} + +var map_ConfigMapNameReference = map[string]string{ + "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced config map", +} + +func (ConfigMapNameReference) SwaggerDoc() map[string]string { + return map_ConfigMapNameReference +} + +var map_DelegatedAuthentication = map[string]string{ + "": "DelegatedAuthentication allows authentication to be disabled.", + "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.", +} + +func (DelegatedAuthentication) SwaggerDoc() map[string]string { + return map_DelegatedAuthentication +} + +var map_DelegatedAuthorization = map[string]string{ + "": "DelegatedAuthorization allows authorization to be disabled.", + "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.", +} + +func (DelegatedAuthorization) SwaggerDoc() map[string]string { + return map_DelegatedAuthorization +} + +var map_EtcdConnectionInfo = map[string]string{ + "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", + "urls": "URLs are the URLs for etcd", + "ca": "CA is a file containing trusted roots for the etcd server certificates", +} + +func (EtcdConnectionInfo) SwaggerDoc() map[string]string { + return map_EtcdConnectionInfo +} + +var map_EtcdStorageConfig = map[string]string{ + "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", +} + +func (EtcdStorageConfig) SwaggerDoc() map[string]string { + return map_EtcdStorageConfig +} + +var map_GenericAPIServerConfig = map[string]string{ + "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd", + "servingInfo": "servingInfo describes how to start serving", + "corsAllowedOrigins": "corsAllowedOrigins", + "auditConfig": "auditConfig describes how to configure audit information", + "storageConfig": "storageConfig contains information about how to use", + "admission": "admissionConfig holds information about how to configure admission.", +} + +func (GenericAPIServerConfig) SwaggerDoc() map[string]string { + return map_GenericAPIServerConfig +} + +var map_GenericControllerConfig = map[string]string{ + "": "GenericControllerConfig provides information to configure a controller", + "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", + "authentication": "authentication allows configuration of authentication for the endpoints", + "authorization": "authorization allows configuration of authentication for the endpoints", +} + +func (GenericControllerConfig) SwaggerDoc() map[string]string { + return map_GenericControllerConfig +} + +var map_HTTPServingInfo = map[string]string{ + "": "HTTPServingInfo holds configuration for serving HTTP", + "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", +} + +func (HTTPServingInfo) SwaggerDoc() map[string]string { + return map_HTTPServingInfo +} + +var map_KubeClientConfig = map[string]string{ + "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config", + "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.", +} + +func (KubeClientConfig) SwaggerDoc() map[string]string { + return map_KubeClientConfig +} + +var map_LeaderElection = map[string]string{ + "": "LeaderElection provides information to elect a leader", + "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.", + "namespace": "namespace indicates which namespace the resource is in", + "name": "name indicates what name to use for the resource", + "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.", + "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.", + "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.", +} + +func (LeaderElection) SwaggerDoc() map[string]string { + return map_LeaderElection +} + +var map_NamedCertificate = map[string]string{ + "": "NamedCertificate specifies a certificate/key, and the names it should be served for", + "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", +} + +func (NamedCertificate) SwaggerDoc() map[string]string { + return map_NamedCertificate +} + +var map_RemoteConnectionInfo = map[string]string{ + "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "URL is the remote URL to connect to", + "ca": "CA is the CA for verifying TLS connections", +} + +func (RemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_RemoteConnectionInfo +} + +var map_SecretNameReference = map[string]string{ + "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced secret", +} + +func (SecretNameReference) SwaggerDoc() map[string]string { + return map_SecretNameReference +} + +var map_ServingInfo = map[string]string{ + "": "ServingInfo holds information about serving web pages", + "bindAddress": "BindAddress is the ip:port to serve on", + "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", +} + +func (ServingInfo) SwaggerDoc() map[string]string { + return map_ServingInfo +} + +var map_StringSource = map[string]string{ + "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.", +} + +func (StringSource) SwaggerDoc() map[string]string { + return map_StringSource +} + +var map_StringSourceSpec = map[string]string{ + "": "StringSourceSpec specifies a string value, or external location", + "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", +} + +func (StringSourceSpec) SwaggerDoc() map[string]string { + return map_StringSourceSpec +} + +var map_APIServer = map[string]string{ + "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (APIServer) SwaggerDoc() map[string]string { + return map_APIServer +} + +var map_APIServerEncryption = map[string]string{ + "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io", +} + +func (APIServerEncryption) SwaggerDoc() map[string]string { + return map_APIServerEncryption +} + +var map_APIServerNamedServingCert = map[string]string{ + "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.", + "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.", + "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.", +} + +func (APIServerNamedServingCert) SwaggerDoc() map[string]string { + return map_APIServerNamedServingCert +} + +var map_APIServerServingCerts = map[string]string{ + "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.", +} + +func (APIServerServingCerts) SwaggerDoc() map[string]string { + return map_APIServerServingCerts +} + +var map_APIServerSpec = map[string]string{ + "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.", + "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", + "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", + "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old and Intermediate profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.", + "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", +} + +func (APIServerSpec) SwaggerDoc() map[string]string { + return map_APIServerSpec +} + +var map_Audit = map[string]string{ + "profile": "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster.\n\nThe following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list).\n\nIf unset, the 'Default' profile is used as the default.", +} + +func (Audit) SwaggerDoc() map[string]string { + return map_Audit +} + +var map_Authentication = map[string]string{ + "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationSpec = map[string]string{ + "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.", + "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.", + "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", + "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.", + "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will result in the invalidation of all bound tokens with the previous issuer value. Unless the holder of a bound token has explicit support for a change in issuer, they will not request a new bound token until pod restart or until their existing token exceeds 80% of its duration.", +} + +func (AuthenticationSpec) SwaggerDoc() map[string]string { + return map_AuthenticationSpec +} + +var map_AuthenticationStatus = map[string]string{ + "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_DeprecatedWebhookTokenAuthenticator = map[string]string{ + "": "deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field.", + "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.", +} + +func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_DeprecatedWebhookTokenAuthenticator +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", + "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +var map_Build = map[string]string{ + "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"", + "spec": "Spec holds user-settable values for the build controller configuration", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildDefaults = map[string]string{ + "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", + "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", + "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "resources": "Resources defines resource requirements to execute the build.", +} + +func (BuildDefaults) SwaggerDoc() map[string]string { + return map_BuildDefaults +} + +var map_BuildOverrides = map[string]string{ + "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node", + "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", + "forcePull": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", +} + +func (BuildOverrides) SwaggerDoc() map[string]string { + return map_BuildOverrides +} + +var map_BuildSpec = map[string]string{ + "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", + "buildDefaults": "BuildDefaults controls the default information for Builds", + "buildOverrides": "BuildOverrides controls override settings for builds", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_ImageLabel = map[string]string{ + "name": "Name defines the name of the label. It must have non-zero length.", + "value": "Value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + +var map_ClusterOperator = map[string]string{ + "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.", + "spec": "spec holds configuration that could apply to any operator.", + "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", +} + +func (ClusterOperator) SwaggerDoc() map[string]string { + return map_ClusterOperator +} + +var map_ClusterOperatorList = map[string]string{ + "": "ClusterOperatorList is a list of OperatorStatus resources.", +} + +func (ClusterOperatorList) SwaggerDoc() map[string]string { + return map_ClusterOperatorList +} + +var map_ClusterOperatorSpec = map[string]string{ + "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".", +} + +func (ClusterOperatorSpec) SwaggerDoc() map[string]string { + return map_ClusterOperatorSpec +} + +var map_ClusterOperatorStatus = map[string]string{ + "": "ClusterOperatorStatus provides information about the status of the operator.", + "conditions": "conditions describes the state of the operator's managed and monitored components.", + "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.", + "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces", + "extension": "extension contains any additional status information specific to the operator which owns this status object.", +} + +func (ClusterOperatorStatus) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatus +} + +var map_ClusterOperatorStatusCondition = map[string]string{ + "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.", + "type": "type specifies the aspect reported by this condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.", + "reason": "reason is the CamelCase reason for the condition's current status.", + "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", +} + +func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatusCondition +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "group": "group of the referent.", + "resource": "resource of the referent.", + "namespace": "namespace of the referent.", + "name": "name of the referent.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_OperandVersion = map[string]string{ + "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.", + "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0", +} + +func (OperandVersion) SwaggerDoc() map[string]string { + return map_OperandVersion +} + +var map_ClusterVersion = map[string]string{ + "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.", + "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.", + "status": "status contains information about the available updates and any in-progress updates.", +} + +func (ClusterVersion) SwaggerDoc() map[string]string { + return map_ClusterVersion +} + +var map_ClusterVersionList = map[string]string{ + "": "ClusterVersionList is a list of ClusterVersion resources.", +} + +func (ClusterVersionList) SwaggerDoc() map[string]string { + return map_ClusterVersionList +} + +var map_ClusterVersionSpec = map[string]string{ + "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", + "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. You may specify the version field without setting image if an update exists with that version in the availableUpdates or history.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", + "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", + "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", +} + +func (ClusterVersionSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionSpec +} + +var map_ClusterVersionStatus = map[string]string{ + "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", + "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", + "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", + "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", + "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", + "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + "availableUpdates": "availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", +} + +func (ClusterVersionStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionStatus +} + +var map_ComponentOverride = map[string]string{ + "": "ComponentOverride allows overriding cluster version operator's behavior for a component.", + "kind": "kind indentifies which object to override.", + "group": "group identifies the API group that the kind is in.", + "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.", + "name": "name is the component's name.", + "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false", +} + +func (ComponentOverride) SwaggerDoc() map[string]string { + return map_ComponentOverride +} + +var map_Release = map[string]string{ + "": "Release represents an OpenShift release image and associated metadata.", + "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", + "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", +} + +func (Release) SwaggerDoc() map[string]string { + return map_Release +} + +var map_Update = map[string]string{ + "": "Update represents an administrator update request.", + "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "force": "force allows an administrator to update to an image that has failed verification, does not appear in the availableUpdates list, or otherwise would be blocked by normal protections on update. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.\n\nThis flag does not override other forms of consistency checking that are required before a new update is deployed.", +} + +func (Update) SwaggerDoc() map[string]string { + return map_Update +} + +var map_UpdateHistory = map[string]string{ + "": "UpdateHistory is a single attempted update to the cluster.", + "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).", + "startedTime": "startedTime is the time at which the update was started.", + "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).", + "version": "version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", + "image": "image is a container image location that contains the update. This value is always populated.", + "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted.", +} + +func (UpdateHistory) SwaggerDoc() map[string]string { + return map_UpdateHistory +} + +var map_Console = map[string]string{ + "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleAuthentication = map[string]string{ + "": "ConsoleAuthentication defines a list of optional configuration for console authentication.", + "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.", +} + +func (ConsoleAuthentication) SwaggerDoc() map[string]string { + return map_ConsoleAuthentication +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", + "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_DNS = map[string]string{ + "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSSpec = map[string]string{ + "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.", + "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.", + "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSZone = map[string]string{ + "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.", + "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get", + "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options", +} + +func (DNSZone) SwaggerDoc() map[string]string { + return map_DNSZone +} + +var map_CustomFeatureGates = map[string]string{ + "enabled": "enabled is a list of all feature gates that you want to force on", + "disabled": "disabled is a list of all feature gates that you want to force off", +} + +func (CustomFeatureGates) SwaggerDoc() map[string]string { + return map_CustomFeatureGates +} + +var map_FeatureGate = map[string]string{ + "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (FeatureGate) SwaggerDoc() map[string]string { + return map_FeatureGate +} + +var map_FeatureGateSelection = map[string]string{ + "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.", + "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.", +} + +func (FeatureGateSelection) SwaggerDoc() map[string]string { + return map_FeatureGateSelection +} + +var map_Image = map[string]string{ + "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ImageSpec = map[string]string{ + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.", + "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.", +} + +func (ImageSpec) SwaggerDoc() map[string]string { + return map_ImageSpec +} + +var map_ImageStatus = map[string]string{ + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", +} + +func (ImageStatus) SwaggerDoc() map[string]string { + return map_ImageStatus +} + +var map_RegistryLocation = map[string]string{ + "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", +} + +func (RegistryLocation) SwaggerDoc() map[string]string { + return map_RegistryLocation +} + +var map_RegistrySources = map[string]string{ + "": "RegistrySources holds cluster-wide information about how to handle the registries config.", + "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.", + "blockedRegistries": "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "allowedRegistries": "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "containerRuntimeSearchRegistries": "containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.", +} + +func (RegistrySources) SwaggerDoc() map[string]string { + return map_RegistrySources +} + +var map_AWSPlatformSpec = map[string]string{ + "": "AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", +} + +func (AWSPlatformSpec) SwaggerDoc() map[string]string { + return map_AWSPlatformSpec +} + +var map_AWSPlatformStatus = map[string]string{ + "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", + "region": "region holds the default AWS region for new AWS resources created by the cluster.", + "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", +} + +func (AWSPlatformStatus) SwaggerDoc() map[string]string { + return map_AWSPlatformStatus +} + +var map_AWSResourceTag = map[string]string{ + "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", + "key": "key is the key of the tag", + "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", +} + +func (AWSResourceTag) SwaggerDoc() map[string]string { + return map_AWSResourceTag +} + +var map_AWSServiceEndpoint = map[string]string{ + "": "AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services.", + "name": "name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty.", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (AWSServiceEndpoint) SwaggerDoc() map[string]string { + return map_AWSServiceEndpoint +} + +var map_AzurePlatformSpec = map[string]string{ + "": "AzurePlatformSpec holds the desired state of the Azure infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AzurePlatformSpec) SwaggerDoc() map[string]string { + return map_AzurePlatformSpec +} + +var map_AzurePlatformStatus = map[string]string{ + "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.", + "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", + "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.", + "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", +} + +func (AzurePlatformStatus) SwaggerDoc() map[string]string { + return map_AzurePlatformStatus +} + +var map_BareMetalPlatformSpec = map[string]string{ + "": "BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (BareMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_BareMetalPlatformSpec +} + +var map_BareMetalPlatformStatus = map[string]string{ + "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (BareMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_BareMetalPlatformStatus +} + +var map_EquinixMetalPlatformSpec = map[string]string{ + "": "EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (EquinixMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformSpec +} + +var map_EquinixMetalPlatformStatus = map[string]string{ + "": "EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformStatus +} + +var map_GCPPlatformSpec = map[string]string{ + "": "GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (GCPPlatformSpec) SwaggerDoc() map[string]string { + return map_GCPPlatformSpec +} + +var map_GCPPlatformStatus = map[string]string{ + "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", + "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", + "region": "region holds the region for new GCP resources created for the cluster.", +} + +func (GCPPlatformStatus) SwaggerDoc() map[string]string { + return map_GCPPlatformStatus +} + +var map_IBMCloudPlatformSpec = map[string]string{ + "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformSpec +} + +var map_IBMCloudPlatformStatus = map[string]string{ + "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", + "location": "Location is where the cluster has been deployed", + "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", + "providerType": "ProviderType indicates the type of cluster that was created", +} + +func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformStatus +} + +var map_Infrastructure = map[string]string{ + "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Infrastructure) SwaggerDoc() map[string]string { + return map_Infrastructure +} + +var map_InfrastructureList = map[string]string{ + "": "InfrastructureList is", +} + +func (InfrastructureList) SwaggerDoc() map[string]string { + return map_InfrastructureList +} + +var map_InfrastructureSpec = map[string]string{ + "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.", + "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.\n\ncloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only.", + "platformSpec": "platformSpec holds desired information specific to the underlying infrastructure provider.", +} + +func (InfrastructureSpec) SwaggerDoc() map[string]string { + return map_InfrastructureSpec +} + +var map_InfrastructureStatus = map[string]string{ + "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", + "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", + "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.", + "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.", + "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.", + "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", + "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", + "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation", + "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation", +} + +func (InfrastructureStatus) SwaggerDoc() map[string]string { + return map_InfrastructureStatus +} + +var map_KubevirtPlatformSpec = map[string]string{ + "": "KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (KubevirtPlatformSpec) SwaggerDoc() map[string]string { + return map_KubevirtPlatformSpec +} + +var map_KubevirtPlatformStatus = map[string]string{ + "": "KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (KubevirtPlatformStatus) SwaggerDoc() map[string]string { + return map_KubevirtPlatformStatus +} + +var map_OpenStackPlatformSpec = map[string]string{ + "": "OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (OpenStackPlatformSpec) SwaggerDoc() map[string]string { + return map_OpenStackPlatformSpec +} + +var map_OpenStackPlatformStatus = map[string]string{ + "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (OpenStackPlatformStatus) SwaggerDoc() map[string]string { + return map_OpenStackPlatformStatus +} + +var map_OvirtPlatformSpec = map[string]string{ + "": "OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (OvirtPlatformSpec) SwaggerDoc() map[string]string { + return map_OvirtPlatformSpec +} + +var map_OvirtPlatformStatus = map[string]string{ + "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.", +} + +func (OvirtPlatformStatus) SwaggerDoc() map[string]string { + return map_OvirtPlatformStatus +} + +var map_PlatformSpec = map[string]string{ + "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "Azure contains settings specific to the Azure infrastructure provider.", + "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "BareMetal contains settings specific to the BareMetal platform.", + "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", +} + +func (PlatformSpec) SwaggerDoc() map[string]string { + return map_PlatformSpec +} + +var map_PlatformStatus = map[string]string{ + "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", + "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "Azure contains settings specific to the Azure infrastructure provider.", + "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "BareMetal contains settings specific to the BareMetal platform.", + "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", +} + +func (PlatformStatus) SwaggerDoc() map[string]string { + return map_PlatformStatus +} + +var map_VSpherePlatformSpec = map[string]string{ + "": "VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (VSpherePlatformSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformSpec +} + +var map_VSpherePlatformStatus = map[string]string{ + "": "VSpherePlatformStatus holds the current status of the vSphere infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (VSpherePlatformStatus) SwaggerDoc() map[string]string { + return map_VSpherePlatformStatus +} + +var map_ComponentRouteSpec = map[string]string{ + "": "ComponentRouteSpec allows for configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "hostname": "hostname is the hostname that should be used by the route.", + "servingCertKeyPairSecret": "servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ComponentRouteSpec) SwaggerDoc() map[string]string { + return map_ComponentRouteSpec +} + +var map_ComponentRouteStatus = map[string]string{ + "": "ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "defaultHostname": "defaultHostname is the hostname of this route prior to customization.", + "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret.", + "currentHostnames": "currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list.", + "conditions": "conditions are used to communicate the state of the componentRoutes entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured.\n\nIf Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect.\n\nIf Progressing is true, that means the component is taking some action related to the componentRoutes entry.", + "relatedObjects": "relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.", +} + +func (ComponentRouteStatus) SwaggerDoc() map[string]string { + return map_ComponentRouteStatus +} + +var map_Ingress = map[string]string{ + "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressSpec = map[string]string{ + "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", + "appsDomain": "appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate.", + "componentRoutes": "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list.\n\nTo determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.", + "cidr": "The complete block for pod IPs.", + "hostPrefix": "The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ExternalIPConfig = map[string]string{ + "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.", + "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.", + "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.", +} + +func (ExternalIPConfig) SwaggerDoc() map[string]string { + return map_ExternalIPConfig +} + +var map_ExternalIPPolicy = map[string]string{ + "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.", + "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.", + "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.", +} + +func (ExternalIPPolicy) SwaggerDoc() map[string]string { + return map_ExternalIPPolicy +} + +var map_Network = map[string]string{ + "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.", + "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the cluster network configuration.", + "networkType": "NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", + "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.", + "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", + "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is the current network configuration.", + "clusterNetwork": "IP address pool to use for pod IPs.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", + "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).", + "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", + "migration": "Migration contains the cluster network migration configuration.", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_BasicAuthIdentityProvider = map[string]string{ + "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials", +} + +func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string { + return map_BasicAuthIdentityProvider +} + +var map_GitHubIdentityProvider = map[string]string{ + "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "organizations": "organizations optionally restricts which organizations are allowed to log in", + "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.", +} + +func (GitHubIdentityProvider) SwaggerDoc() map[string]string { + return map_GitHubIdentityProvider +} + +var map_GitLabIdentityProvider = map[string]string{ + "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "url": "url is the oauth server base URL", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", +} + +func (GitLabIdentityProvider) SwaggerDoc() map[string]string { + return map_GitLabIdentityProvider +} + +var map_GoogleIdentityProvider = map[string]string{ + "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", +} + +func (GoogleIdentityProvider) SwaggerDoc() map[string]string { + return map_GoogleIdentityProvider +} + +var map_HTPasswdIdentityProvider = map[string]string{ + "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials", + "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string { + return map_HTPasswdIdentityProvider +} + +var map_IdentityProvider = map[string]string{ + "": "IdentityProvider provides identities for users authenticating using credentials", + "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName", + "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"", +} + +func (IdentityProvider) SwaggerDoc() map[string]string { + return map_IdentityProvider +} + +var map_IdentityProviderConfig = map[string]string{ + "": "IdentityProviderConfig contains configuration for using a specific identity provider", + "type": "type identifies the identity provider type for this entry.", + "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP", + "github": "github enables user authentication using GitHub credentials", + "gitlab": "gitlab enables user authentication using GitLab credentials", + "google": "google enables user authentication using Google credentials", + "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials", + "keystone": "keystone enables user authentication using keystone password credentials", + "ldap": "ldap enables user authentication using LDAP credentials", + "openID": "openID enables user authentication using OpenID credentials", + "requestHeader": "requestHeader enables user authentication using request header credentials", +} + +func (IdentityProviderConfig) SwaggerDoc() map[string]string { + return map_IdentityProviderConfig +} + +var map_KeystoneIdentityProvider = map[string]string{ + "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials", + "domainName": "domainName is required for keystone v3", +} + +func (KeystoneIdentityProvider) SwaggerDoc() map[string]string { + return map_KeystoneIdentityProvider +} + +var map_LDAPAttributeMapping = map[string]string{ + "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", + "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (LDAPAttributeMapping) SwaggerDoc() map[string]string { + return map_LDAPAttributeMapping +} + +var map_LDAPIdentityProvider = map[string]string{ + "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials", + "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "bindDN is an optional DN to bind with during the search phase.", + "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "attributes": "attributes maps LDAP attributes to identities", +} + +func (LDAPIdentityProvider) SwaggerDoc() map[string]string { + return map_LDAPIdentityProvider +} + +var map_OAuth = map[string]string{ + "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OAuth) SwaggerDoc() map[string]string { + return map_OAuth +} + +var map_OAuthRemoteConnectionInfo = map[string]string{ + "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "url is the remote URL to connect to", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", + "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_OAuthRemoteConnectionInfo +} + +var map_OAuthSpec = map[string]string{ + "": "OAuthSpec contains desired cluster auth configuration", + "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.", + "tokenConfig": "tokenConfig contains options for authorization and access tokens", + "templates": "templates allow you to customize pages like the login page.", +} + +func (OAuthSpec) SwaggerDoc() map[string]string { + return map_OAuthSpec +} + +var map_OAuthStatus = map[string]string{ + "": "OAuthStatus shows current known state of OAuth server in the cluster", +} + +func (OAuthStatus) SwaggerDoc() map[string]string { + return map_OAuthStatus +} + +var map_OAuthTemplates = map[string]string{ + "": "OAuthTemplates allow for customization of pages like the login page", + "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.", + "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.", + "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.", +} + +func (OAuthTemplates) SwaggerDoc() map[string]string { + return map_OAuthTemplates +} + +var map_OpenIDClaims = map[string]string{ + "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", + "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim", + "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (OpenIDClaims) SwaggerDoc() map[string]string { + return map_OpenIDClaims +} + +var map_OpenIDIdentityProvider = map[string]string{ + "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", + "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.", + "claims": "claims mappings", +} + +func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { + return map_OpenIDIdentityProvider +} + +var map_RequestHeaderIdentityProvider = map[string]string{ + "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials", + "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.", + "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.", + "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.", + "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "nameHeaders is the set of headers to check for the display name", + "emailHeaders": "emailHeaders is the set of headers to check for the email address", +} + +func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { + return map_RequestHeaderIdentityProvider +} + +var map_TokenConfig = map[string]string{ + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.", + "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.", +} + +func (TokenConfig) SwaggerDoc() map[string]string { + return map_TokenConfig +} + +var map_HubSource = map[string]string{ + "": "HubSource is used to specify the hub source and its configuration", + "name": "name is the name of one of the default hub sources", + "disabled": "disabled is used to disable a default hub source on cluster", +} + +func (HubSource) SwaggerDoc() map[string]string { + return map_HubSource +} + +var map_HubSourceStatus = map[string]string{ + "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source", + "status": "status indicates success or failure in applying the configuration", + "message": "message provides more information regarding failures", +} + +func (HubSourceStatus) SwaggerDoc() map[string]string { + return map_HubSourceStatus +} + +var map_OperatorHub = map[string]string{ + "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.", +} + +func (OperatorHub) SwaggerDoc() map[string]string { + return map_OperatorHub +} + +var map_OperatorHubList = map[string]string{ + "": "OperatorHubList contains a list of OperatorHub", +} + +func (OperatorHubList) SwaggerDoc() map[string]string { + return map_OperatorHubList +} + +var map_OperatorHubSpec = map[string]string{ + "": "OperatorHubSpec defines the desired state of OperatorHub", + "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.", + "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.", +} + +func (OperatorHubSpec) SwaggerDoc() map[string]string { + return map_OperatorHubSpec +} + +var map_OperatorHubStatus = map[string]string{ + "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.", + "sources": "sources encapsulates the result of applying the configuration for each hub source", +} + +func (OperatorHubStatus) SwaggerDoc() map[string]string { + return map_OperatorHubStatus +} + +var map_Project = map[string]string{ + "": "Project holds cluster-wide information about Project. The canonical name is `cluster`", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Project) SwaggerDoc() map[string]string { + return map_Project +} + +var map_ProjectSpec = map[string]string{ + "": "ProjectSpec holds the project creation configuration.", + "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.", +} + +func (ProjectSpec) SwaggerDoc() map[string]string { + return map_ProjectSpec +} + +var map_TemplateReference = map[string]string{ + "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced project request template", +} + +func (TemplateReference) SwaggerDoc() map[string]string { + return map_TemplateReference +} + +var map_Proxy = map[string]string{ + "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`", + "spec": "Spec holds user-settable values for the proxy configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Proxy) SwaggerDoc() map[string]string { + return map_Proxy +} + +var map_ProxySpec = map[string]string{ + "": "ProxySpec contains cluster proxy creation configuration.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var.", + "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", + "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well.\n\nThe namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", +} + +func (ProxySpec) SwaggerDoc() map[string]string { + return map_ProxySpec +} + +var map_ProxyStatus = map[string]string{ + "": "ProxyStatus shows current known state of the cluster proxy.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.", +} + +func (ProxyStatus) SwaggerDoc() map[string]string { + return map_ProxyStatus +} + +var map_Scheduler = map[string]string{ + "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Scheduler) SwaggerDoc() map[string]string { + return map_Scheduler +} + +var map_SchedulerSpec = map[string]string{ + "policy": "DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", + "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", + "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", + "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", +} + +func (SchedulerSpec) SwaggerDoc() map[string]string { + return map_SchedulerSpec +} + +var map_CustomTLSProfile = map[string]string{ + "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.", +} + +func (CustomTLSProfile) SwaggerDoc() map[string]string { + return map_CustomTLSProfile +} + +var map_IntermediateTLSProfile = map[string]string{ + "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29", +} + +func (IntermediateTLSProfile) SwaggerDoc() map[string]string { + return map_IntermediateTLSProfile +} + +var map_ModernTLSProfile = map[string]string{ + "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility", +} + +func (ModernTLSProfile) SwaggerDoc() map[string]string { + return map_ModernTLSProfile +} + +var map_OldTLSProfile = map[string]string{ + "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility", +} + +func (OldTLSProfile) SwaggerDoc() map[string]string { + return map_OldTLSProfile +} + +var map_TLSProfileSpec = map[string]string{ + "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", + "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", + "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: TLSv1.1\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12", +} + +func (TLSProfileSpec) SwaggerDoc() map[string]string { + return map_TLSProfileSpec +} + +var map_TLSSecurityProfile = map[string]string{ + "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.", + "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: TLSv1.0", + "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n minTLSVersion: TLSv1.2", + "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: TLSv1.3\n\nNOTE: Currently unsupported.", + "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: TLSv1.1", +} + +func (TLSSecurityProfile) SwaggerDoc() map[string]string { + return map_TLSSecurityProfile +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml new file mode 100644 index 0000000000..195e3e065d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml @@ -0,0 +1,165 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/612 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: configs.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: Config + plural: configs + singular: config + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Config provides information to configure the config operator. + It handles installation, migration or synchronization of cloud based cluster + configurations like AWS or Azure. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Config Operator. + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status defines the observed status of the Config Operator. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml new file mode 100644 index 0000000000..565ac24401 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml @@ -0,0 +1,231 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/752 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: etcds.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: Etcd + plural: etcds + singular: etcd + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Etcd provides information to configure an operator to manage + etcd. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + type: integer + format: int32 + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why + you think it will work this time instead of failing again on the + same config. + type: string + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + type: integer + format: int32 + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + type: integer + format: int32 + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + type: array + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + type: object + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + type: integer + format: int32 + lastFailedCount: + description: lastFailedCount is how often the last failed revision + failed. + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + type: integer + format: int32 + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + type: array + items: + type: string + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + type: string + format: date-time + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + type: integer + format: int32 + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml new file mode 100644 index 0000000000..63ba8cdded --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml @@ -0,0 +1,233 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: kubeapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: KubeAPIServer + plural: kubeapiservers + singular: kubeapiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: KubeAPIServer provides information to configure an operator to + manage kube-apiserver. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Kubernetes API Server + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why + you think it will work this time instead of failing again on the + same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + API Server + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the last failed revision + failed. + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch new file mode 100644 index 0000000000..8145f00c49 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml-patch @@ -0,0 +1,3 @@ +- op: replace + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/managementState/pattern + value: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml new file mode 100644 index 0000000000..94b2820bf8 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml @@ -0,0 +1,235 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: kubecontrollermanagers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeControllerManager + plural: kubecontrollermanagers + singular: kubecontrollermanager + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: KubeControllerManager provides information to configure an operator + to manage kube-controller-manager. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Kubernetes Controller Manager + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why + you think it will work this time instead of failing again on the + same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + Controller Manager + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the last failed revision + failed. + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch new file mode 100644 index 0000000000..8145f00c49 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml-patch @@ -0,0 +1,3 @@ +- op: replace + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/managementState/pattern + value: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml new file mode 100644 index 0000000000..ea2329342b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml @@ -0,0 +1,235 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: kubeschedulers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: KubeScheduler + plural: kubeschedulers + singular: kubescheduler + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: KubeScheduler provides information to configure an operator to + manage scheduler. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Kubernetes Scheduler + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why + you think it will work this time instead of failing again on the + same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Force)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Kubernetes + Scheduler + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the last failed revision + failed. + type: integer + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of the errors + during the failed deployment referenced in lastFailedRevision + items: + type: string + type: array + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch new file mode 100644 index 0000000000..8145f00c49 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml-patch @@ -0,0 +1,3 @@ +- op: replace + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/managementState/pattern + value: "^(Managed|Force)$" diff --git a/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml new file mode 100644 index 0000000000..6826d2c6a3 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml @@ -0,0 +1,171 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: openshiftapiservers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: OpenShiftAPIServer + plural: openshiftapiservers + singular: openshiftapiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpenShiftAPIServer provides information to configure an operator + to manage openshift-apiserver. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + OpenShift API Server. + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status defines the observed status of the OpenShift API Server. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + latestAvailableRevision: + description: latestAvailableRevision is the latest revision used as + suffix of revisioned secrets like encryption-config. A new revision + causes a new deployment of pods. + type: integer + format: int32 + minimum: 0 + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml new file mode 100644 index 0000000000..d7c932b6f7 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml @@ -0,0 +1,179 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/692 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: cloudcredentials.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: CloudCredential + listKind: CloudCredentialList + plural: cloudcredentials + singular: cloudcredential + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CloudCredential provides a means to configure an operator to + manage CredentialsRequests. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CloudCredentialSpec is the specification of the desired behavior + of the cloud-credential-operator. + type: object + properties: + credentialsMode: + description: 'CredentialsMode allows informing CCO that it should + not attempt to dynamically determine the root cloud credentials + capabilities, and it should just run in the specified mode. It also + allows putting the operator into "manual" mode if desired. Leaving + the field in default mode runs CCO so that the cluster''s cloud + credentials will be dynamically probed for capabilities (on supported + clouds/platforms). Supported modes: AWS/Azure/GCP: "" (Default), + "Mint", "Passthrough", "Manual" Others: Do not set value as other + platforms only support running in "Passthrough"' + type: string + enum: + - "" + - Manual + - Mint + - Passthrough + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: CloudCredentialStatus defines the observed status of the + cloud-credential-operator. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml new file mode 100644 index 0000000000..f6cb9db1b0 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml @@ -0,0 +1,160 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/503 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: kubestorageversionmigrators.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: KubeStorageVersionMigrator + listKind: KubeStorageVersionMigratorList + plural: kubestorageversionmigrators + singular: kubestorageversionmigrator + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: KubeStorageVersionMigrator provides information to configure + an operator to manage kube-storage-version-migrator. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml new file mode 100644 index 0000000000..c15d59480f --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml @@ -0,0 +1,169 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: authentications.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Authentication + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Authentication provides information to configure an operator + to manage authentication. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + oauthAPIServer: + description: OAuthAPIServer holds status specific only to oauth-apiserver + type: object + properties: + latestAvailableRevision: + description: LatestAvailableRevision is the latest revision used + as suffix of revisioned secrets like encryption-config. A new + revision causes a new deployment of pods. + type: integer + format: int32 + minimum: 0 + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml new file mode 100644 index 0000000000..0994b896bc --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml @@ -0,0 +1,161 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: openshiftcontrollermanagers.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: OpenShiftControllerManager + plural: openshiftcontrollermanagers + singular: openshiftcontrollermanager + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OpenShiftControllerManager provides information to configure + an operator to manage openshift-controller-manager. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml new file mode 100644 index 0000000000..7ae9984e4f --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml @@ -0,0 +1,162 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/670 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: storages.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Storage + plural: storages + singular: storage + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Storage provides a means to configure an operator to manage the + cluster storage operator. `cluster` is the canonical name. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml new file mode 100644 index 0000000000..a34f345249 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml @@ -0,0 +1,1288 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/616 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: ingresscontrollers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: IngressController + listKind: IngressControllerList + plural: ingresscontrollers + singular: ingresscontroller + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "IngressController describes a managed ingress controller for + the cluster. The controller can service OpenShift Route and Kubernetes Ingress + resources. \n When an IngressController is created, a new ingress controller + deployment is created to allow external traffic to reach the services that + expose Ingress or Route resources. Updating this resource may lead to disruption + for public facing network connections as a new ingress controller revision + may be rolled out. \n https://kubernetes.io/docs/concepts/services-networking/ingress-controllers + \n Whenever possible, sensible defaults for the platform are used. See each + field for more details." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + IngressController. + properties: + defaultCertificate: + description: "defaultCertificate is a reference to a secret containing + the default certificate served by the ingress controller. When Routes + don't specify their own certificate, defaultCertificate is used. + \n The secret must contain the following keys and data: \n tls.crt: + certificate file contents tls.key: key file contents \n If unset, + a wildcard certificate is automatically generated and used. The + certificate is valid for the ingress controller domain (and subdomains) + and the generated certificate's CA will be automatically integrated + with the cluster's trust store. \n If a wildcard certificate is + used and shared by multiple HTTP/2 enabled routes (which implies + ALPN) then clients (i.e., notably browsers) are at liberty to reuse + open connections. This means a client can reuse a connection to + another route and that is likely to fail. This behaviour is generally + known as connection coalescing. \n The in-use certificate (whether + generated or user-specified) will be automatically integrated with + OpenShift's built-in OAuth server." + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + domain: + description: "domain is a DNS name serviced by the ingress controller + and is used to configure multiple features: \n * For the LoadBalancerService + endpoint publishing strategy, domain is used to configure DNS + records. See endpointPublishingStrategy. \n * When using a generated + default certificate, the certificate will be valid for domain + and its subdomains. See defaultCertificate. \n * The value is published + to individual Route statuses so that end-users know where to target + external DNS records. \n domain must be unique among all IngressControllers, + and cannot be updated. \n If empty, defaults to ingress.config.openshift.io/cluster + .spec.domain." + type: string + endpointPublishingStrategy: + description: "endpointPublishingStrategy is used to publish the ingress + controller endpoints to other networks, enable load balancer integrations, + etc. \n If unset, the default is based on infrastructure.config.openshift.io/cluster + .status.platform: \n AWS: LoadBalancerService (with External + scope) Azure: LoadBalancerService (with External scope) GCP: + \ LoadBalancerService (with External scope) IBMCloud: LoadBalancerService + (with External scope) Libvirt: HostNetwork \n Any other platform + types (including None) default to HostNetwork. \n endpointPublishingStrategy + cannot be updated." + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork + endpoint publishing strategy. Present only if type is HostNetwork. + properties: + protocol: + description: "protocol specifies whether the IngressController + expects incoming connections to use plain TCP or whether + the IngressController expects PROXY protocol. \n PROXY protocol + can be used with load balancers that support it to communicate + the source addresses of client connections when forwarding + those connections to the IngressController. Using PROXY + protocol enables the IngressController to report those source + addresses instead of reporting the load balancer's address + in HTTP headers and logs. Note that enabling PROXY protocol + on the IngressController will cause connections to fail + if you are not using a load balancer that uses PROXY protocol + to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt + for information about PROXY protocol. \n The following values + are valid for this field: \n * The empty string. * \"TCP\". + * \"PROXY\". \n The empty string specifies the default, + which is TCP without PROXY protocol. Note that the default + is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. + Present only if type is LoadBalancerService. + properties: + providerParameters: + description: "providerParameters holds desired load balancer + information specific to the underlying infrastructure provider. + \n If empty, defaults will be applied. See specific providerParameters + fields for details about their defaults." + properties: + aws: + description: "aws provides configuration settings that + are specific to AWS load balancers. \n If empty, defaults + will be applied. See specific aws fields for details + about their defaults." + properties: + classicLoadBalancer: + description: classicLoadBalancerParameters holds configuration + parameters for an AWS classic load balancer. Present + only if type is Classic. + type: object + networkLoadBalancer: + description: networkLoadBalancerParameters holds configuration + parameters for an AWS network load balancer. Present + only if type is NLB. + type: object + type: + description: "type is the type of AWS load balancer + to instantiate for an ingresscontroller. \n Valid + values are: \n * \"Classic\": A Classic Load Balancer + that makes routing decisions at either the transport + layer (TCP/SSL) or the application layer (HTTP/HTTPS). + See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + \n * \"NLB\": A Network Load Balancer that makes + routing decisions at the transport layer (TCP/SSL). + See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - Classic + - NLB + type: string + required: + - type + type: object + gcp: + description: "gcp provides configuration settings that + are specific to GCP load balancers. \n If empty, defaults + will be applied. See specific gcp fields for details + about their defaults." + properties: + clientAccess: + description: "clientAccess describes how client access + is restricted for internal load balancers. \n Valid + values are: * \"Global\": Specifying an internal + load balancer with Global client access allows + clients from any region within the VPC to communicate + with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + \n * \"Local\": Specifying an internal load balancer + with Local client access means only clients within + the same region (and VPC) as the GCP load balancer + \ can communicate with the load balancer. Note + that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" + enum: + - Global + - Local + type: string + type: object + type: + description: type is the underlying infrastructure provider + for the load balancer. Allowed values are "AWS", "Azure", + "BareMetal", "GCP", "OpenStack", and "VSphere". + enum: + - AWS + - Azure + - BareMetal + - GCP + - OpenStack + - VSphere + - IBM + type: string + required: + - type + type: object + scope: + description: scope indicates the scope at which the load balancer + is exposed. Possible values are "External" and "Internal". + enum: + - Internal + - External + type: string + required: + - scope + type: object + nodePort: + description: nodePort holds parameters for the NodePortService + endpoint publishing strategy. Present only if type is NodePortService. + properties: + protocol: + description: "protocol specifies whether the IngressController + expects incoming connections to use plain TCP or whether + the IngressController expects PROXY protocol. \n PROXY protocol + can be used with load balancers that support it to communicate + the source addresses of client connections when forwarding + those connections to the IngressController. Using PROXY + protocol enables the IngressController to report those source + addresses instead of reporting the load balancer's address + in HTTP headers and logs. Note that enabling PROXY protocol + on the IngressController will cause connections to fail + if you are not using a load balancer that uses PROXY protocol + to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt + for information about PROXY protocol. \n The following values + are valid for this field: \n * The empty string. * \"TCP\". + * \"PROXY\". \n The empty string specifies the default, + which is TCP without PROXY protocol. Note that the default + is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + private: + description: private holds parameters for the Private endpoint + publishing strategy. Present only if type is Private. + type: object + type: + description: "type is the publishing strategy to use. Valid values + are: \n * LoadBalancerService \n Publishes the ingress controller + using a Kubernetes LoadBalancer Service. \n In this configuration, + the ingress controller deployment uses container networking. + A LoadBalancer Service is created to publish the deployment. + \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + \n If domain is set, a wildcard DNS record will be managed to + point at the LoadBalancer Service's external name. DNS records + are managed only in DNS zones defined by dns.config.openshift.io/cluster + .spec.publicZone and .spec.privateZone. \n Wildcard DNS management + is currently supported only on the AWS, Azure, and GCP platforms. + \n * HostNetwork \n Publishes the ingress controller on node + ports where the ingress controller is deployed. \n In this configuration, + the ingress controller deployment uses host networking, bound + to node ports 80 and 443. The user is responsible for configuring + an external load balancer to publish the ingress controller + via the node ports. \n * Private \n Does not publish the ingress + controller. \n In this configuration, the ingress controller + deployment uses container networking, and is not explicitly + published. The user must manually publish the ingress controller. + \n * NodePortService \n Publishes the ingress controller using + a Kubernetes NodePort Service. \n In this configuration, the + ingress controller deployment uses container networking. A NodePort + Service is created to publish the deployment. The specific node + ports are dynamically allocated by OpenShift; however, to support + static port allocations, user changes to the node port field + of the managed NodePort Service will preserved." + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + type: string + required: + - type + type: object + httpErrorCodePages: + description: httpErrorCodePages specifies a configmap with custom + error pages. The administrator must create this configmap in the + openshift-config namespace. This configmap should have keys in the + format "error-page-.http", where is an + HTTP error code. For example, "error-page-503.http" defines an error + page for HTTP 503 responses. Currently only error pages for 503 + and 404 responses can be customized. Each value in the configmap + should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http + If this field is empty, the ingress controller uses the default + error pages. + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + required: + - name + type: object + httpHeaders: + description: "httpHeaders defines policy for HTTP headers. \n If this + field is empty, the default values are used." + properties: + forwardedHeaderPolicy: + description: "forwardedHeaderPolicy specifies when and how the + IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, + X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version + HTTP headers. The value may be one of the following: \n * \"Append\", + which specifies that the IngressController appends the headers, + preserving existing headers. \n * \"Replace\", which specifies + that the IngressController sets the headers, replacing any + existing Forwarded or X-Forwarded-* headers. \n * \"IfNone\", + which specifies that the IngressController sets the headers + if they are not already set. \n * \"Never\", which specifies + that the IngressController never sets the headers, preserving + any existing headers. \n By default, the policy is \"Append\"." + enum: + - Append + - Replace + - IfNone + - Never + type: string + headerNameCaseAdjustments: + description: "headerNameCaseAdjustments specifies case adjustments + that can be applied to HTTP header names. Each adjustment is + specified as an HTTP header name with the desired capitalization. + \ For example, specifying \"X-Forwarded-For\" indicates that + the \"x-forwarded-for\" HTTP header should be adjusted to have + the specified capitalization. \n These adjustments are only + applied to cleartext, edge-terminated, and re-encrypt routes, + and only when using HTTP/1. \n For request headers, these adjustments + are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true + annotation. For response headers, these adjustments are applied + to all HTTP responses. \n If this field is empty, no request + headers are adjusted." + items: + description: IngressControllerHTTPHeaderNameCaseAdjustment is + the name of an HTTP header (for example, "X-Forwarded-For") + in the desired capitalization. The value must be a valid + HTTP header name as defined in RFC 2616 section 4.2. + maxLength: 1024 + minLength: 0 + pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + nullable: true + type: array + uniqueId: + description: "uniqueId describes configuration for a custom HTTP + header that the ingress controller should inject into incoming + HTTP requests. Typically, this header is configured to have + a value that is unique to the HTTP request. The header can + be used by applications or included in access logs to facilitate + tracing individual HTTP requests. \n If this field is empty, + no such header is injected into requests." + properties: + format: + description: 'format specifies the format for the injected + HTTP header''s value. This field has no effect unless name + is specified. For the HAProxy-based ingress controller + implementation, this format uses the same syntax as the + HTTP log format. If the field is empty, the default value + is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the corresponding + HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3' + maxLength: 1024 + minLength: 0 + pattern: ^(%(%|(\{[-+]?[QXE](,[-+]?[QXE])*\})?([A-Za-z]+|\[[.0-9A-Z_a-z]+(\([^)]+\))?(,[.0-9A-Z_a-z]+(\([^)]+\))?)*\]))|[^%[:cntrl:]])*$ + type: string + name: + description: name specifies the name of the HTTP header (for + example, "unique-id") that the ingress controller should + inject into HTTP requests. The field's value must be a + valid HTTP header name as defined in RFC 2616 section 4.2. If + the field is empty, no header is injected. + maxLength: 1024 + minLength: 0 + pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + type: object + type: object + logging: + description: logging defines parameters for what should be logged + where. If this field is empty, operational logs are enabled but + access logs are disabled. + properties: + access: + description: "access describes how the client requests should + be logged. \n If this field is empty, access logging is disabled." + properties: + destination: + description: destination is where access logs go. + properties: + container: + description: container holds parameters for the Container + logging destination. Present only if type is Container. + type: object + syslog: + description: syslog holds parameters for a syslog endpoint. Present + only if type is Syslog. + oneOf: + - properties: + address: + format: ipv4 + - properties: + address: + format: ipv6 + properties: + address: + description: address is the IP address of the syslog + endpoint that receives log messages. + type: string + facility: + description: "facility specifies the syslog facility + of log messages. \n If this field is empty, the + facility is \"local1\"." + enum: + - kern + - user + - mail + - daemon + - auth + - syslog + - lpr + - news + - uucp + - cron + - auth2 + - ftp + - ntp + - audit + - alert + - cron2 + - local0 + - local1 + - local2 + - local3 + - local4 + - local5 + - local6 + - local7 + type: string + port: + description: port is the UDP port number of the syslog + endpoint that receives log messages. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + type: + description: "type is the type of destination for logs. + \ It must be one of the following: \n * Container \n + The ingress operator configures the sidecar container + named \"logs\" on the ingress controller pod and configures + the ingress controller to write logs to the sidecar. + \ The logs are then available as container logs. The + expectation is that the administrator configures a custom + logging solution that reads logs from this sidecar. + \ Note that using container logs means that logs may + be dropped if the rate of logs exceeds the container + runtime's or the custom logging solution's capacity. + \n * Syslog \n Logs are sent to a syslog endpoint. The + administrator must specify an endpoint that can receive + syslog messages. The expectation is that the administrator + has configured a custom syslog instance." + enum: + - Container + - Syslog + type: string + required: + - type + type: object + httpCaptureCookies: + description: httpCaptureCookies specifies HTTP cookies that + should be captured in access logs. If this field is empty, + no cookies are captured. + items: + description: IngressControllerCaptureHTTPCookie describes + an HTTP cookie that should be captured. + properties: + matchType: + description: matchType specifies the type of match to + be performed on the cookie name. Allowed values are + "Exact" for an exact string match and "Prefix" for + a string prefix match. If "Exact" is specified, a + name must be specified in the name field. If "Prefix" + is provided, a prefix must be specified in the namePrefix + field. For example, specifying matchType "Prefix" + and namePrefix "foo" will capture a cookie named "foo" + or "foobar" but not one named "bar". The first matching + cookie is captured. + enum: + - Exact + - Prefix + type: string + maxLength: + description: maxLength specifies a maximum length of + the string that will be logged, which includes the + cookie name, cookie value, and one-character delimiter. If + the log entry exceeds this length, the value will + be truncated in the log message. Note that the ingress + controller may impose a separate bound on the total + length of HTTP headers in a request. + maximum: 1024 + minimum: 1 + type: integer + name: + description: name specifies a cookie name. Its value + must be a valid HTTP cookie name as defined in RFC + 6265 section 4.1. + maxLength: 1024 + minLength: 0 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ + type: string + namePrefix: + description: namePrefix specifies a cookie name prefix. Its + value must be a valid HTTP cookie name as defined + in RFC 6265 section 4.1. + maxLength: 1024 + minLength: 0 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ + type: string + required: + - matchType + - maxLength + type: object + maxItems: 1 + nullable: true + type: array + httpCaptureHeaders: + description: "httpCaptureHeaders defines HTTP headers that + should be captured in access logs. If this field is empty, + no headers are captured. \n Note that this option only applies + to cleartext HTTP connections and to secure HTTP connections + for which the ingress controller terminates encryption (that + is, edge-terminated or reencrypt connections). Headers + cannot be captured for TLS passthrough connections." + properties: + request: + description: "request specifies which HTTP request headers + to capture. \n If this field is empty, no request headers + are captured." + items: + description: IngressControllerCaptureHTTPHeader describes + an HTTP header that should be captured. + properties: + maxLength: + description: maxLength specifies a maximum length + for the header value. If a header value exceeds + this length, the value will be truncated in the + log message. Note that the ingress controller + may impose a separate bound on the total length + of HTTP headers in a request. + minimum: 1 + type: integer + name: + description: name specifies a header name. Its + value must be a valid HTTP header name as defined + in RFC 2616 section 4.2. + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + required: + - maxLength + - name + type: object + nullable: true + type: array + response: + description: "response specifies which HTTP response headers + to capture. \n If this field is empty, no response headers + are captured." + items: + description: IngressControllerCaptureHTTPHeader describes + an HTTP header that should be captured. + properties: + maxLength: + description: maxLength specifies a maximum length + for the header value. If a header value exceeds + this length, the value will be truncated in the + log message. Note that the ingress controller + may impose a separate bound on the total length + of HTTP headers in a request. + minimum: 1 + type: integer + name: + description: name specifies a header name. Its + value must be a valid HTTP header name as defined + in RFC 2616 section 4.2. + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + required: + - maxLength + - name + type: object + nullable: true + type: array + type: object + httpLogFormat: + description: "httpLogFormat specifies the format of the log + message for an HTTP request. \n If this field is empty, + log messages use the implementation's default HTTP log format. + \ For HAProxy's default HTTP log format, see the HAProxy + documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + \n Note that this format only applies to cleartext HTTP + connections and to secure HTTP connections for which the + ingress controller terminates encryption (that is, edge-terminated + or reencrypt connections). It does not affect the log format + for TLS passthrough connections." + type: string + required: + - destination + type: object + type: object + namespaceSelector: + description: "namespaceSelector is used to filter the set of namespaces + serviced by the ingress controller. This is useful for implementing + shards. \n If unset, the default is no filtering." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + nodePlacement: + description: "nodePlacement enables explicit control over the scheduling + of the ingress controller. \n If unset, defaults are used. See NodePlacement + for more details." + properties: + nodeSelector: + description: "nodeSelector is the node selector applied to ingress + controller deployments. \n If unset, the default is: \n kubernetes.io/os: + linux node-role.kubernetes.io/worker: '' \n If set, the specified + selector is used and replaces the default." + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + tolerations: + description: "tolerations is a list of tolerations applied to + ingress controller deployments. \n The default is an empty list. + \n See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/" + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + type: object + replicas: + description: replicas is the desired number of ingress controller + replicas. If unset, defaults to 2. + format: int32 + type: integer + routeAdmission: + description: "routeAdmission defines a policy for handling new route + claims (for example, to allow or deny claims across namespaces). + \n If empty, defaults will be applied. See specific routeAdmission + fields for details about their defaults." + properties: + namespaceOwnership: + description: "namespaceOwnership describes how host name claims + across namespaces should be handled. \n Value must be one of: + \n - Strict: Do not allow routes in different namespaces to + claim the same host. \n - InterNamespaceAllowed: Allow routes + to claim different paths of the same host name across namespaces. + \n If empty, the default is Strict." + enum: + - InterNamespaceAllowed + - Strict + type: string + wildcardPolicy: + description: "wildcardPolicy describes how routes with wildcard + policies should be handled for the ingress controller. WildcardPolicy + controls use of routes [1] exposed by the ingress controller + based on the route's wildcard policy. \n [1] https://github.com/openshift/api/blob/master/route/v1/types.go + \n Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed + will cause admitted routes with a wildcard policy of Subdomain + to stop working. These routes must be updated to a wildcard + policy of None to be readmitted by the ingress controller. \n + WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed + values. \n If empty, defaults to \"WildcardsDisallowed\"." + enum: + - WildcardsAllowed + - WildcardsDisallowed + type: string + type: object + routeSelector: + description: "routeSelector is used to filter the set of Routes serviced + by the ingress controller. This is useful for implementing shards. + \n If unset, the default is no filtering." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections + for ingresscontrollers. \n If unset, the default is based on the + apiservers.config.openshift.io/cluster resource. \n Note that when + using the Old, Intermediate, and Modern profile types, the effective + profile configuration is subject to change between releases. For + example, given a specification to use the Intermediate profile deployed + on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new + profile configuration to be applied to the ingress controller, resulting + in a rollout. \n Note that the minimum TLS version for ingress controllers + is 1.1, and the maximum TLS version is 1.2. An implication of this + restriction is that the Modern TLS profile type cannot be used because + it requires TLS 1.3." + properties: + custom: + description: "custom is a user-defined TLS security profile. Be + extremely careful using a custom profile as invalid configurations + can be catastrophic. An example custom profile looks like this: + \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 + \ minTLSVersion: TLSv1.1" + nullable: true + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms + that are negotiated during the TLS handshake. Operators + may remove entries their operands do not support. For example, + to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + items: + type: string + type: array + minTLSVersion: + description: "minTLSVersion is used to specify the minimal + version of the TLS protocol that is negotiated during the + TLS handshake. For example, to use TLS versions 1.1, 1.2 + and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently + the highest minTLSVersion allowed is VersionTLS12" + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + type: string + type: object + intermediate: + description: "intermediate is a TLS security profile based on: + \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 + \ minTLSVersion: TLSv1.2" + nullable: true + type: object + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + nullable: true + type: object + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 + \ - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 + \ - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA + \ - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 + \ - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA + \ - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - + DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 + \ - AES128-SHA256 - AES256-SHA256 - AES128-SHA - + AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + nullable: true + type: object + type: + description: "type is one of Old, Intermediate, Modern or Custom. + Custom provides the ability to specify individual TLS security + profile parameters. Old, Intermediate and Modern are TLS security + profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + \n The profiles are intent based, so they may change over time + as new ciphers are developed and existing ciphers are found + to be insecure. Depending on precisely which ciphers are available + to a process, the list may be reduced. \n Note that the Modern + profile is currently not supported because it is not yet well + adopted by common software libraries." + enum: + - Old + - Intermediate + - Modern + - Custom + type: string + type: object + tuningOptions: + description: "tuningOptions defines parameters for adjusting the performance + of ingress controller pods. All fields are optional and will use + their respective defaults if not set. See specific tuningOptions + fields for more details. \n Setting fields within tuningOptions + is generally not recommended. The default values are suitable for + most configurations." + properties: + headerBufferBytes: + description: "headerBufferBytes describes how much memory should + be reserved (in bytes) for IngressController connection sessions. + Note that this value must be at least 16384 if HTTP/2 is enabled + for the IngressController (https://tools.ietf.org/html/rfc7540). + If this field is empty, the IngressController will use a default + value of 32768 bytes. \n Setting this field is generally not + recommended as headerBufferBytes values that are too small may + break the IngressController and headerBufferBytes values that + are too large could cause the IngressController to use significantly + more memory than necessary." + format: int32 + minimum: 16384 + type: integer + headerBufferMaxRewriteBytes: + description: "headerBufferMaxRewriteBytes describes how much memory + should be reserved (in bytes) from headerBufferBytes for HTTP + header rewriting and appending for IngressController connection + sessions. Note that incoming HTTP requests will be limited to + (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning + headerBufferBytes must be greater than headerBufferMaxRewriteBytes. + If this field is empty, the IngressController will use a default + value of 8192 bytes. \n Setting this field is generally not + recommended as headerBufferMaxRewriteBytes values that are too + small may break the IngressController and headerBufferMaxRewriteBytes + values that are too large could cause the IngressController + to use significantly more memory than necessary." + format: int32 + minimum: 4096 + type: integer + threadCount: + description: "threadCount defines the number of threads created + per HAProxy process. Creating more threads allows each ingress + controller pod to handle more connections, at the cost of more + system resources being used. HAProxy currently supports up to + 64 threads. If this field is empty, the IngressController will + use the default value. The current default is 4 threads, but + this may change in future releases. \n Setting this field is + generally not recommended. Increasing the number of HAProxy + threads allows ingress controller pods to utilize more CPU time + under load, potentially starving other pods if set too high. + Reducing the number of threads may cause the ingress controller + to perform poorly." + format: int32 + maximum: 64 + minimum: 1 + type: integer + type: object + unsupportedConfigOverrides: + description: unsupportedConfigOverrides allows specifying unsupported + configuration options. Its use is unsupported. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the IngressController. + properties: + availableReplicas: + description: availableReplicas is number of observed available replicas + according to the ingress controller deployment. + format: int32 + type: integer + conditions: + description: "conditions is a list of conditions and their status. + \n Available means the ingress controller deployment is available + and servicing route and ingress resources (i.e, .status.availableReplicas + equals .spec.replicas) \n There are additional conditions which + indicate the status of other ingress controller features and capabilities. + \n * LoadBalancerManaged - True if the following conditions + are met: * The endpoint publishing strategy requires a service + load balancer. - False if any of those conditions are unsatisfied. + \n * LoadBalancerReady - True if the following conditions are + met: * A load balancer is managed. * The load balancer is + ready. - False if any of those conditions are unsatisfied. \n + \ * DNSManaged - True if the following conditions are met: * + The endpoint publishing strategy and platform support DNS. * + The ingress controller domain is set. * dns.config.openshift.io/cluster + configures DNS zones. - False if any of those conditions are unsatisfied. + \n * DNSReady - True if the following conditions are met: * + DNS is managed. * DNS records have been successfully created. + \ - False if any of those conditions are unsatisfied." + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + domain: + description: domain is the actual domain in use. + type: string + endpointPublishingStrategy: + description: endpointPublishingStrategy is the actual strategy in + use. + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork + endpoint publishing strategy. Present only if type is HostNetwork. + properties: + protocol: + description: "protocol specifies whether the IngressController + expects incoming connections to use plain TCP or whether + the IngressController expects PROXY protocol. \n PROXY protocol + can be used with load balancers that support it to communicate + the source addresses of client connections when forwarding + those connections to the IngressController. Using PROXY + protocol enables the IngressController to report those source + addresses instead of reporting the load balancer's address + in HTTP headers and logs. Note that enabling PROXY protocol + on the IngressController will cause connections to fail + if you are not using a load balancer that uses PROXY protocol + to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt + for information about PROXY protocol. \n The following values + are valid for this field: \n * The empty string. * \"TCP\". + * \"PROXY\". \n The empty string specifies the default, + which is TCP without PROXY protocol. Note that the default + is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. + Present only if type is LoadBalancerService. + properties: + providerParameters: + description: "providerParameters holds desired load balancer + information specific to the underlying infrastructure provider. + \n If empty, defaults will be applied. See specific providerParameters + fields for details about their defaults." + properties: + aws: + description: "aws provides configuration settings that + are specific to AWS load balancers. \n If empty, defaults + will be applied. See specific aws fields for details + about their defaults." + properties: + classicLoadBalancer: + description: classicLoadBalancerParameters holds configuration + parameters for an AWS classic load balancer. Present + only if type is Classic. + type: object + networkLoadBalancer: + description: networkLoadBalancerParameters holds configuration + parameters for an AWS network load balancer. Present + only if type is NLB. + type: object + type: + description: "type is the type of AWS load balancer + to instantiate for an ingresscontroller. \n Valid + values are: \n * \"Classic\": A Classic Load Balancer + that makes routing decisions at either the transport + layer (TCP/SSL) or the application layer (HTTP/HTTPS). + See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + \n * \"NLB\": A Network Load Balancer that makes + routing decisions at the transport layer (TCP/SSL). + See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - Classic + - NLB + type: string + required: + - type + type: object + gcp: + description: "gcp provides configuration settings that + are specific to GCP load balancers. \n If empty, defaults + will be applied. See specific gcp fields for details + about their defaults." + properties: + clientAccess: + description: "clientAccess describes how client access + is restricted for internal load balancers. \n Valid + values are: * \"Global\": Specifying an internal + load balancer with Global client access allows + clients from any region within the VPC to communicate + with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + \n * \"Local\": Specifying an internal load balancer + with Local client access means only clients within + the same region (and VPC) as the GCP load balancer + \ can communicate with the load balancer. Note + that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" + enum: + - Global + - Local + type: string + type: object + type: + description: type is the underlying infrastructure provider + for the load balancer. Allowed values are "AWS", "Azure", + "BareMetal", "GCP", "OpenStack", and "VSphere". + enum: + - AWS + - Azure + - BareMetal + - GCP + - OpenStack + - VSphere + - IBM + type: string + required: + - type + type: object + scope: + description: scope indicates the scope at which the load balancer + is exposed. Possible values are "External" and "Internal". + enum: + - Internal + - External + type: string + required: + - scope + type: object + nodePort: + description: nodePort holds parameters for the NodePortService + endpoint publishing strategy. Present only if type is NodePortService. + properties: + protocol: + description: "protocol specifies whether the IngressController + expects incoming connections to use plain TCP or whether + the IngressController expects PROXY protocol. \n PROXY protocol + can be used with load balancers that support it to communicate + the source addresses of client connections when forwarding + those connections to the IngressController. Using PROXY + protocol enables the IngressController to report those source + addresses instead of reporting the load balancer's address + in HTTP headers and logs. Note that enabling PROXY protocol + on the IngressController will cause connections to fail + if you are not using a load balancer that uses PROXY protocol + to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt + for information about PROXY protocol. \n The following values + are valid for this field: \n * The empty string. * \"TCP\". + * \"PROXY\". \n The empty string specifies the default, + which is TCP without PROXY protocol. Note that the default + is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + private: + description: private holds parameters for the Private endpoint + publishing strategy. Present only if type is Private. + type: object + type: + description: "type is the publishing strategy to use. Valid values + are: \n * LoadBalancerService \n Publishes the ingress controller + using a Kubernetes LoadBalancer Service. \n In this configuration, + the ingress controller deployment uses container networking. + A LoadBalancer Service is created to publish the deployment. + \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + \n If domain is set, a wildcard DNS record will be managed to + point at the LoadBalancer Service's external name. DNS records + are managed only in DNS zones defined by dns.config.openshift.io/cluster + .spec.publicZone and .spec.privateZone. \n Wildcard DNS management + is currently supported only on the AWS, Azure, and GCP platforms. + \n * HostNetwork \n Publishes the ingress controller on node + ports where the ingress controller is deployed. \n In this configuration, + the ingress controller deployment uses host networking, bound + to node ports 80 and 443. The user is responsible for configuring + an external load balancer to publish the ingress controller + via the node ports. \n * Private \n Does not publish the ingress + controller. \n In this configuration, the ingress controller + deployment uses container networking, and is not explicitly + published. The user must manually publish the ingress controller. + \n * NodePortService \n Publishes the ingress controller using + a Kubernetes NodePort Service. \n In this configuration, the + ingress controller deployment uses container networking. A NodePort + Service is created to publish the deployment. The specific node + ports are dynamically allocated by OpenShift; however, to support + static port allocations, user changes to the node port field + of the managed NodePort Service will preserved." + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + type: string + required: + - type + type: object + observedGeneration: + description: observedGeneration is the most recent generation observed. + format: int64 + type: integer + selector: + description: selector is a label selector, in string format, for ingress + controller pods corresponding to the IngressController. The number + of matching pods should equal the value of availableReplicas. + type: string + tlsProfile: + description: tlsProfile is the TLS connection configuration that is + in effect. + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms + that are negotiated during the TLS handshake. Operators may + remove entries their operands do not support. For example, + to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + items: + type: string + type: array + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version + of the TLS protocol that is negotiated during the TLS handshake. + For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n + \ minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion + allowed is VersionTLS12" + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.availableReplicas + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch new file mode 100644 index 0000000000..6076c3a31b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml-patch @@ -0,0 +1,9 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/logging/properties/access/properties/destination/properties/syslog/oneOf + value: + - properties: + address: + format: ipv4 + - properties: + address: + format: ipv6 diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml new file mode 100644 index 0000000000..c7951a7c57 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml @@ -0,0 +1,163 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: servicecas.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ServiceCA + listKind: ServiceCAList + plural: servicecas + singular: serviceca + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ServiceCA provides information to configure an operator to manage + the service cert controllers + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml new file mode 100644 index 0000000000..77b49efe26 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml @@ -0,0 +1,626 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: networks.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Network describes the cluster's desired network configuration. + It is consumed by the cluster-network-operator. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSpec is the top-level network configuration object. + type: object + properties: + additionalNetworks: + description: additionalNetworks is a list of extra networks to make + available to pods when multiple networks are enabled. + type: array + items: + description: AdditionalNetworkDefinition configures an extra network + that is available but not created by default. Instead, pods must + request them by name. type must be specified, along with exactly + one "Config" that matches the type. + type: object + properties: + name: + description: name is the name of the network. This will be populated + in the resulting CRD This must be unique. + type: string + namespace: + description: namespace is the namespace of the network. This + will be populated in the resulting CRD If not given the network + will be created in the default namespace. + type: string + rawCNIConfig: + description: rawCNIConfig is the raw CNI configuration json + to create in the NetworkAttachmentDefinition CRD + type: string + simpleMacvlanConfig: + description: SimpleMacvlanConfig configures the macvlan interface + in case of type:NetworkTypeSimpleMacvlan + type: object + properties: + ipamConfig: + description: IPAMConfig configures IPAM module will be used + for IP Address Management (IPAM). + type: object + properties: + staticIPAMConfig: + description: StaticIPAMConfig configures the static + IP address in case of type:IPAMTypeStatic + type: object + properties: + addresses: + description: Addresses configures IP address for + the interface + type: array + items: + description: StaticIPAMAddresses provides IP address + and Gateway for static IPAM addresses + type: object + properties: + address: + description: Address is the IP address in + CIDR format + type: string + gateway: + description: Gateway is IP inside of subnet + to designate as the gateway + type: string + dns: + description: DNS configures DNS for the interface + type: object + properties: + domain: + description: Domain configures the domainname + the local domain used for short hostname lookups + type: string + nameservers: + description: Nameservers points DNS servers + for IP lookup + type: array + items: + type: string + search: + description: Search configures priority ordered + search domains for short hostname lookups + type: array + items: + type: string + routes: + description: Routes configures IP routes for the + interface + type: array + items: + description: StaticIPAMRoutes provides Destination/Gateway + pairs for static IPAM routes + type: object + properties: + destination: + description: Destination points the IP route + destination + type: string + gateway: + description: Gateway is the route's next-hop + IP address If unset, a default gateway is + assumed (as determined by the CNI plugin). + type: string + type: + description: Type is the type of IPAM module will be + used for IP Address Management(IPAM). The supported + values are IPAMTypeDHCP, IPAMTypeStatic + type: string + master: + description: master is the host interface to create the + macvlan interface from. If not specified, it will be default + route interface + type: string + mode: + description: 'mode is the macvlan mode: bridge, private, + vepa, passthru. The default is bridge' + type: string + mtu: + description: mtu is the mtu to use for the macvlan interface. + if unset, host's kernel will select the value. + type: integer + format: int32 + minimum: 0 + type: + description: type is the type of network The supported values + are NetworkTypeRaw, NetworkTypeSimpleMacvlan + type: string + clusterNetwork: + description: clusterNetwork is the IP address pool to use for pod + IPs. Some network providers, e.g. OpenShift SDN, support multiple + ClusterNetworks. Others only support one. This is equivalent to + the cluster-cidr. + type: array + items: + description: ClusterNetworkEntry is a subnet from which to allocate + PodIPs. A network of size HostPrefix (in CIDR notation) will be + allocated when nodes join the cluster. If the HostPrefix field + is not used by the plugin, it can be left unset. Not all network + providers support multiple ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + defaultNetwork: + description: defaultNetwork is the "default" network that all pods + will receive + type: object + properties: + kuryrConfig: + description: KuryrConfig configures the kuryr plugin + type: object + properties: + controllerProbesPort: + description: The port kuryr-controller will listen for readiness + and liveness requests. + type: integer + format: int32 + minimum: 0 + daemonProbesPort: + description: The port kuryr-daemon will listen for readiness + and liveness requests. + type: integer + format: int32 + minimum: 0 + enablePortPoolsPrepopulation: + description: enablePortPoolsPrepopulation when true will make + Kuryr prepopulate each newly created port pool with a minimum + number of ports. Kuryr uses Neutron port pooling to fight + the fact that it takes a significant amount of time to create + one. Instead of creating it when pod is being deployed, + Kuryr keeps a number of ports ready to be attached to pods. + By default port prepopulation is disabled. + type: boolean + mtu: + description: mtu is the MTU that Kuryr should use when creating + pod networks in Neutron. The value has to be lower or equal + to the MTU of the nodes network and Neutron has to allow + creation of tenant networks with such MTU. If unset Pod + networks will be created with the same MTU as the nodes + network has. + type: integer + format: int32 + minimum: 0 + openStackServiceNetwork: + description: openStackServiceNetwork contains the CIDR of + network from which to allocate IPs for OpenStack Octavia's + Amphora VMs. Please note that with Amphora driver Octavia + uses two IPs from that network for each loadbalancer - one + given by OpenShift and second for VRRP connections. As the + first one is managed by OpenShift's and second by Neutron's + IPAMs, those need to come from different pools. Therefore + `openStackServiceNetwork` needs to be at least twice the + size of `serviceNetwork`, and whole `serviceNetwork` must + be overlapping with `openStackServiceNetwork`. cluster-network-operator + will then make sure VRRP IPs are taken from the ranges inside + `openStackServiceNetwork` that are not overlapping with + `serviceNetwork`, effectivly preventing conflicts. If not + set cluster-network-operator will use `serviceNetwork` expanded + by decrementing the prefix size by 1. + type: string + poolBatchPorts: + description: poolBatchPorts sets a number of ports that should + be created in a single batch request to extend the port + pool. The default is 3. For more information about port + pools see enablePortPoolsPrepopulation setting. + type: integer + minimum: 0 + poolMaxPorts: + description: poolMaxPorts sets a maximum number of free ports + that are being kept in a port pool. If the number of ports + exceeds this setting, free ports will get deleted. Setting + 0 will disable this upper bound, effectively preventing + pools from shrinking and this is the default value. For + more information about port pools see enablePortPoolsPrepopulation + setting. + type: integer + minimum: 0 + poolMinPorts: + description: poolMinPorts sets a minimum number of free ports + that should be kept in a port pool. If the number of ports + is lower than this setting, new ports will get created and + added to pool. The default is 1. For more information about + port pools see enablePortPoolsPrepopulation setting. + type: integer + minimum: 1 + openshiftSDNConfig: + description: openShiftSDNConfig configures the openshift-sdn plugin + type: object + properties: + enableUnidling: + description: enableUnidling controls whether or not the service + proxy will support idling and unidling of services. By default, + unidling is enabled. + type: boolean + mode: + description: mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + type: string + mtu: + description: mtu is the mtu to use for the tunnel interface. + Defaults to 1450 if unset. This must be 50 bytes smaller + than the machine's uplink. + type: integer + format: int32 + minimum: 0 + useExternalOpenvswitch: + description: useExternalOpenvswitch tells the operator not + to install openvswitch, because it will be provided separately. + If set, you must provide it yourself. + type: boolean + vxlanPort: + description: vxlanPort is the port to use for all vxlan packets. + The default is 4789. + type: integer + format: int32 + minimum: 0 + ovnKubernetesConfig: + description: oVNKubernetesConfig configures the ovn-kubernetes + plugin. This is currently not implemented. + type: object + properties: + genevePort: + description: geneve port is the UDP port to be used by geneve + encapulation. Default is 6081 + type: integer + format: int32 + minimum: 1 + hybridOverlayConfig: + description: HybridOverlayConfig configures an additional + overlay network for peers that are not using OVN. + type: object + properties: + hybridClusterNetwork: + description: HybridClusterNetwork defines a network space + given to nodes on an additional overlay network. + type: array + items: + description: ClusterNetworkEntry is a subnet from which + to allocate PodIPs. A network of size HostPrefix (in + CIDR notation) will be allocated when nodes join the + cluster. If the HostPrefix field is not used by the + plugin, it can be left unset. Not all network providers + support multiple ClusterNetworks + type: object + properties: + cidr: + type: string + hostPrefix: + type: integer + format: int32 + minimum: 0 + hybridOverlayVXLANPort: + description: HybridOverlayVXLANPort defines the VXLAN + port number to be used by the additional overlay network. + Default is 4789 + type: integer + format: int32 + ipsecConfig: + description: ipsecConfig enables and configures IPsec for + pods on the pod network within the cluster. + type: object + mtu: + description: mtu is the MTU to use for the tunnel interface. + This must be 100 bytes smaller than the uplink mtu. Default + is 1400 + type: integer + format: int32 + minimum: 0 + policyAuditConfig: + description: policyAuditConfig is the configuration for network + policy audit events. If unset, reported defaults are used. + type: object + properties: + destination: + description: 'destination is the location for policy log + messages. Regardless of this config, persistent logs + will always be dumped to the host at /var/log/ovn/ however + Additionally syslog output may be configured as follows. + Valid values are: - "libc" -> to use the libc syslog() + function of the host node''s journdald process - "udp:host:port" + -> for sending syslog over UDP - "unix:file" -> for + using the UNIX domain socket directly - "null" -> to + discard all messages logged to syslog The default is + "null"' + type: string + default: "null" + maxFileSize: + description: maxFilesSize is the max size an ACL_audit + log file is allowed to reach before rotation occurs + Units are in MB and the Default is 50MB + type: integer + format: int32 + default: 50 + minimum: 1 + rateLimit: + description: rateLimit is the approximate maximum number + of messages to generate per-second per-node. If unset + the default of 20 msg/sec is used. + type: integer + format: int32 + default: 20 + minimum: 1 + syslogFacility: + description: syslogFacility the RFC5424 facility for generated + messages, e.g. "kern". Default is "local0" + type: string + default: local0 + type: + description: type is the type of network All NetworkTypes are + supported except for NetworkTypeRaw + type: string + deployKubeProxy: + description: deployKubeProxy specifies whether or not a standalone + kube-proxy should be deployed by the operator. Some network providers + include kube-proxy or similar functionality. If unset, the plugin + will attempt to select the correct value, which is false when OpenShift + SDN and ovn-kubernetes are used and true otherwise. + type: boolean + disableMultiNetwork: + description: disableMultiNetwork specifies whether or not multiple + pod network support should be disabled. If unset, this property + defaults to 'false' and multiple network support is enabled. + type: boolean + disableNetworkDiagnostics: + description: disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck + CRs from a test pod to every node, apiserver and LB should be disabled + or not. If unset, this property defaults to 'false' and network + diagnostics is enabled. Setting this to 'true' would reduce the + additional load of the pods performing the checks. + type: boolean + default: false + exportNetworkFlows: + description: exportNetworkFlows enables and configures the export + of network flow metadata from the pod network by using protocols + NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes + plugin. If unset, flows will not be exported to any collector. + type: object + properties: + ipfix: + description: ipfix defines IPFIX configuration. + type: object + properties: + collectors: + description: ipfixCollectors is list of strings formatted + as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):[0-9]+$ + netFlow: + description: netFlow defines the NetFlow configuration. + type: object + properties: + collectors: + description: netFlow defines the NetFlow collectors that will + consume the flow data exported from OVS. It is a list of + strings formatted as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):[0-9]+$ + sFlow: + description: sFlow defines the SFlow configuration. + type: object + properties: + collectors: + description: sFlowCollectors is list of strings formatted + as ip:port with a maximum of ten items + type: array + maxItems: 10 + minItems: 1 + items: + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):[0-9]+$ + kubeProxyConfig: + description: kubeProxyConfig lets us configure desired proxy configuration. + If not specified, sensible defaults will be chosen by OpenShift + directly. Not consumed by all network providers - currently only + openshift-sdn. + type: object + properties: + bindAddress: + description: The address to "bind" on Defaults to 0.0.0.0 + type: string + iptablesSyncPeriod: + description: 'An internal kube-proxy parameter. In older releases + of OCP, this sometimes needed to be adjusted in large clusters + for performance reasons, but this is no longer necessary, and + there is no reason to change this from the default value. Default: + 30s' + type: string + proxyArguments: + description: Any additional arguments to pass to the kubeproxy + process + type: object + additionalProperties: + description: ProxyArgumentList is a list of arguments to pass + to the kubeproxy process + type: array + items: + type: string + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + migration: + description: migration enables and configures the cluster network + migration. Setting this to the target network type to allow changing + the default network. If unset, the operation of changing cluster + default network plugin will be rejected. + type: object + properties: + networkType: + description: networkType is the target type of network migration + The supported values are OpenShiftSDN, OVNKubernetes + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + serviceNetwork: + description: serviceNetwork is the ip address pool to use for Service + IPs Currently, all existing network providers only support a single + value here, but this is an array to allow for growth. + type: array + items: + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + useMultiNetworkPolicy: + description: useMultiNetworkPolicy enables a controller which allows + for MultiNetworkPolicy objects to be used on additional networks + as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy + objects, but NetworkPolicy objects only apply to the primary interface. + With MultiNetworkPolicy, you can control the traffic that a pod + can receive over the secondary interfaces. If unset, this property + defaults to 'false' and MultiNetworkPolicy objects are ignored. + If 'disableMultiNetwork' is 'true' then the value of this field + is ignored. + type: boolean + status: + description: NetworkStatus is detailed operator status, which is distilled + up to the Network clusteroperator object. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml new file mode 100644 index 0000000000..4bef02fe28 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml @@ -0,0 +1,370 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/486 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consoles.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Console + listKind: ConsoleList + plural: consoles + singular: console + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Console provides a means to configure an operator to manage the + console. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleSpec is the specification of the desired behavior + of the Console. + type: object + properties: + customization: + description: customization is used to optionally provide a small set + of customization options to the web console. + type: object + properties: + addPage: + description: addPage allows customizing actions on the Add page + in developer perspective. + type: object + properties: + disabledActions: + description: disabledActions is a list of actions that are + not shown to users. Each action in the list is represented + by its ID. + type: array + minItems: 1 + items: + type: string + brand: + description: brand is the default branding of the web console + which can be overridden by providing the brand field. There + is a limited set of specific brand options. This field controls + elements of the console such as the logo. Invalid value will + prevent a console rollout. + type: string + pattern: ^$|^(ocp|origin|okd|dedicated|online|azure)$ + customLogoFile: + description: 'customLogoFile replaces the default OpenShift logo + in the masthead and about dialog. It is a reference to a ConfigMap + in the openshift-config namespace. This can be created with + a command like ''oc create configmap custom-logo --from-file=/path/to/file + -n openshift-config''. Image size must be less than 1 MB due + to constraints on the ConfigMap size. The ConfigMap key should + include a file extension so that the console serves the file + with the correct MIME type. Recommended logo specifications: + Dimensions: Max height of 68px and max width of 200px SVG format + preferred' + type: object + properties: + key: + description: Key allows pointing to a specific key/value inside + of the configmap. This is useful for logical file references. + type: string + name: + type: string + customProductName: + description: customProductName is the name that will be displayed + in page titles, logo alt text, and the about dialog instead + of the normal OpenShift product name. + type: string + developerCatalog: + description: developerCatalog allows to configure the shown developer + catalog categories. + type: object + properties: + categories: + description: categories which are shown in the developer catalog. + type: array + items: + description: DeveloperConsoleCatalogCategory for the developer + console catalog. + type: object + required: + - id + - label + properties: + id: + description: ID is an identifier used in the URL to + enable deep linking in console. ID is required and + must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + type: string + maxLength: 32 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + label: + description: label defines a category display label. + It is required and must have 1-64 characters. + type: string + maxLength: 64 + minLength: 1 + subcategories: + description: subcategories defines a list of child categories. + type: array + items: + description: DeveloperConsoleCatalogCategoryMeta are + the key identifiers of a developer catalog category. + type: object + required: + - id + - label + properties: + id: + description: ID is an identifier used in the URL + to enable deep linking in console. ID is required + and must have 1-32 URL safe (A-Z, a-z, 0-9, + - and _) characters. + type: string + maxLength: 32 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + label: + description: label defines a category display + label. It is required and must have 1-64 characters. + type: string + maxLength: 64 + minLength: 1 + tags: + description: tags is a list of strings that will + match the category. A selected category show + all items which has at least one overlapping + tag between category and item. + type: array + items: + type: string + tags: + description: tags is a list of strings that will match + the category. A selected category show all items which + has at least one overlapping tag between category + and item. + type: array + items: + type: string + documentationBaseURL: + description: documentationBaseURL links to external documentation + are shown in various sections of the web console. Providing + documentationBaseURL will override the default documentation + URL. Invalid value will prevent a console rollout. + type: string + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$ + projectAccess: + description: projectAccess allows customizing the available list + of ClusterRoles in the Developer perspective Project access + page which can be used by a project admin to specify roles to + other users and restrict access within the project. If set, + the list will replace the default ClusterRole options. + type: object + properties: + availableClusterRoles: + description: availableClusterRoles is the list of ClusterRole + names that are assignable to users through the project access + tab. + type: array + items: + type: string + quickStarts: + description: quickStarts allows customization of available ConsoleQuickStart + resources in console. + type: object + properties: + disabled: + description: disabled is a list of ConsoleQuickStart resource + names that are not shown to users. + type: array + items: + type: string + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + plugins: + description: plugins defines a list of enabled console plugin names. + type: array + items: + type: string + providers: + description: providers contains configuration for using specific service + providers. + type: object + properties: + statuspage: + description: statuspage contains ID for statuspage.io page that + provides status info about. + type: object + properties: + pageID: + description: pageID is the unique ID assigned by Statuspage + for your page. This must be a public page. + type: string + route: + description: route contains hostname and secret reference that contains + the serving certificate. If a custom route is specified, a new route + will be created with the provided hostname, under which console + will be available. In case of custom hostname uses the default routing + suffix of the cluster, the Secret specification for a serving certificate + will not be needed. In case of custom hostname points to an arbitrary + domain, manual DNS configurations steps are necessary. The default + console route will be maintained to reserve the default hostname + for console if the custom route is removed. If not specified, default + route will be used. DEPRECATED + type: object + properties: + hostname: + description: hostname is the desired custom domain under which + console will be available. + type: string + secret: + description: 'secret points to secret in the openshift-config + namespace that contains custom certificate and key and needs + to be created manually by the cluster admin. Referenced Secret + is required to contain following key value pairs: - "tls.crt" + - to specifies custom certificate - "tls.key" - to specifies + private key of the custom certificate If the custom hostname + uses the default routing suffix of the cluster, the Secret specification + for a serving certificate will not be needed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: ConsoleStatus defines the observed status of the Console. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml new file mode 100644 index 0000000000..62183b3e35 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml @@ -0,0 +1,206 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: dnses.operator.openshift.io + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" +spec: + group: operator.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + description: "DNS manages the CoreDNS component to provide a name resolution + service for pods and services in the cluster. \n This supports the DNS-based + service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md + \n More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns" + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + DNS. + type: object + properties: + nodePlacement: + description: "nodePlacement provides explicit control over the scheduling + of DNS pods. \n Generally, it is useful to run a DNS pod on every + node so that DNS queries are always handled by a local DNS pod instead + of going over the network to a DNS pod on another node. However, + security policies may require restricting the placement of DNS pods + to specific nodes. For example, if a security policy prohibits pods + on arbitrary nodes from communicating with the API, a node selector + can be specified to restrict DNS pods to nodes that are permitted + to communicate with the API. Conversely, if running DNS pods on + nodes with a particular taint is desired, a toleration can be specified + for that taint. \n If unset, defaults are used. See nodePlacement + for more details." + type: object + properties: + nodeSelector: + description: "nodeSelector is the node selector applied to DNS + pods. \n If empty, the default is used, which is currently the + following: \n kubernetes.io/os: linux \n This default is subject + to change. \n If set, the specified selector is used and replaces + the default." + type: object + additionalProperties: + type: string + tolerations: + description: "tolerations is a list of tolerations applied to + DNS pods. \n The default is an empty list. This default is + subject to change. \n See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/" + type: array + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + servers: + description: "servers is a list of DNS resolvers that provide name + query delegation for one or more subdomains outside the scope of + the cluster domain. If servers consists of more than one Server, + longest suffix match will be used to determine the Server. \n For + example, if there are two Servers, one for \"foo.com\" and another + for \"a.foo.com\", and the name query is for \"www.a.foo.com\", + it will be routed to the Server with Zone \"a.foo.com\". \n If this + field is nil, no servers are created." + type: array + items: + description: Server defines the schema for a server that runs per + instance of CoreDNS. + type: object + properties: + forwardPlugin: + description: forwardPlugin defines a schema for configuring + CoreDNS to proxy DNS messages to upstream resolvers. + type: object + properties: + upstreams: + description: "upstreams is a list of resolvers to forward + name queries for subdomains of Zones. Upstreams are randomized + when more than 1 upstream is specified. Each instance + of CoreDNS performs health checking of Upstreams. When + a healthy upstream returns an error during the exchange, + another resolver is tried from Upstreams. Each upstream + is represented by an IP address or IP:port if the upstream + listens on a port other than 53. \n A maximum of 15 upstreams + is allowed per ForwardPlugin." + type: array + maxItems: 15 + items: + type: string + name: + description: name is required and specifies a unique name for + the server. Name must comply with the Service Name Syntax + of rfc6335. + type: string + zones: + description: zones is required and specifies the subdomains + that Server is authoritative for. Zones must conform to the + rfc1123 definition of a subdomain. Specifying the cluster + domain (i.e., "cluster.local") is invalid. + type: array + items: + type: string + status: + description: status is the most recently observed status of the DNS. + type: object + required: + - clusterDomain + - clusterIP + properties: + clusterDomain: + description: "clusterDomain is the local cluster DNS domain suffix + for DNS services. This will be a subdomain as defined in RFC 1034, + section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: + \"cluster.local\" \n More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service" + type: string + clusterIP: + description: "clusterIP is the service IP through which this DNS is + made available. \n In the case of the default DNS, this will be + a well known IP that is used as the default nameserver for pods + that are using the default ClusterFirst DNS policy. \n In general, + this IP can be specified in a pod's spec.dnsConfig.nameservers list + or used explicitly when performing name resolution from within the + cluster. Example: dig foo.com @ \n More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies" + type: string + conditions: + description: "conditions provide information about the state of the + DNS on the cluster. \n These are the supported DNS conditions: \n + \ * Available - True if the following conditions are met: * + DNS controller daemonset is available. - False if any of those + conditions are unsatisfied." + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string diff --git a/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml new file mode 100644 index 0000000000..5e4596f018 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml @@ -0,0 +1,162 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/562 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: csisnapshotcontrollers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: CSISnapshotController + plural: csisnapshotcontrollers + singular: csisnapshotcontroller + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: CSISnapshotController provides a means to configure an operator + to manage the CSI snapshots. `cluster` is the canonical name. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator + should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml new file mode 100644 index 0000000000..21c94a0132 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml @@ -0,0 +1,176 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/701 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: clustercsidrivers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ClusterCSIDriver + plural: clustercsidrivers + singular: clustercsidriver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterCSIDriver object allows management and configuration of + a CSI driver operator installed by default in OpenShift. Name of the object + must be name of the CSI driver it operates. See CSIDriverName type for list + of allowed values. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + properties: + name: + enum: + - ebs.csi.aws.com + - disk.csi.azure.com + - pd.csi.storage.gke.io + - cinder.csi.openstack.org + - csi.vsphere.vmware.com + - manila.csi.openstack.org + - csi.ovirt.org + - csi.kubevirt.io + type: string + type: object + spec: + description: spec holds user settable values for configuration + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch new file mode 100644 index 0000000000..dfcaf8b44e --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch @@ -0,0 +1,14 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/metadata/properties + value: + name: + type: string + enum: + - ebs.csi.aws.com + - disk.csi.azure.com + - pd.csi.storage.gke.io + - cinder.csi.openstack.org + - csi.vsphere.vmware.com + - manila.csi.openstack.org + - csi.ovirt.org + - csi.kubevirt.io diff --git a/vendor/github.com/openshift/api/operator/v1/doc.go b/vendor/github.com/openshift/api/operator/v1/doc.go new file mode 100644 index 0000000000..3de961a7fc --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=operator.openshift.io +package v1 diff --git a/vendor/github.com/openshift/api/operator/v1/register.go b/vendor/github.com/openshift/api/operator/v1/register.go new file mode 100644 index 0000000000..71727a824c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/register.go @@ -0,0 +1,76 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &Authentication{}, + &AuthenticationList{}, + &DNS{}, + &DNSList{}, + &CloudCredential{}, + &CloudCredentialList{}, + &ClusterCSIDriver{}, + &ClusterCSIDriverList{}, + &Console{}, + &ConsoleList{}, + &CSISnapshotController{}, + &CSISnapshotControllerList{}, + &Etcd{}, + &EtcdList{}, + &KubeAPIServer{}, + &KubeAPIServerList{}, + &KubeControllerManager{}, + &KubeControllerManagerList{}, + &KubeScheduler{}, + &KubeSchedulerList{}, + &KubeStorageVersionMigrator{}, + &KubeStorageVersionMigratorList{}, + &Network{}, + &NetworkList{}, + &OpenShiftAPIServer{}, + &OpenShiftAPIServerList{}, + &OpenShiftControllerManager{}, + &OpenShiftControllerManagerList{}, + &ServiceCA{}, + &ServiceCAList{}, + &ServiceCatalogAPIServer{}, + &ServiceCatalogAPIServerList{}, + &ServiceCatalogControllerManager{}, + &ServiceCatalogControllerManagerList{}, + &IngressController{}, + &IngressControllerList{}, + &Storage{}, + &StorageList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go new file mode 100644 index 0000000000..c4586ad317 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -0,0 +1,227 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// MyOperatorResource is an example operator configuration type +type MyOperatorResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec MyOperatorResourceSpec `json:"spec"` + Status MyOperatorResourceStatus `json:"status"` +} + +type MyOperatorResourceSpec struct { + OperatorSpec `json:",inline"` +} + +type MyOperatorResourceStatus struct { + OperatorStatus `json:",inline"` +} + +// +kubebuilder:validation:Pattern=`^(Managed|Unmanaged|Force|Removed)$` +type ManagementState string + +var ( + // Force means that the operator is actively managing its resources but will not block an upgrade + // if unmet prereqs exist. This state puts the operator at risk for unsuccessful upgrades + Force ManagementState = "Force" + // Managed means that the operator is actively managing its resources and trying to keep the component active. + // It will only upgrade the component if it is safe to do so + Managed ManagementState = "Managed" + // Unmanaged means that the operator will not take any action related to the component + // Some operators might not support this management state as it might damage the cluster and lead to manual recovery. + Unmanaged ManagementState = "Unmanaged" + // Removed means that the operator is actively managing its resources and trying to remove all traces of the component + // Some operators (like kube-apiserver-operator) might not support this management state as removing the API server will + // brick the cluster. + Removed ManagementState = "Removed" +) + +// OperatorSpec contains common fields operators need. It is intended to be anonymous included +// inside of the Spec struct for your particular operator. +type OperatorSpec struct { + // managementState indicates whether and how the operator should manage the component + ManagementState ManagementState `json:"managementState"` + + // logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for their operands. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + LogLevel LogLevel `json:"logLevel,omitempty"` + + // operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a + // simple way to manage coarse grained logging choices that operators have to interpret for themselves. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + OperatorLogLevel LogLevel `json:"operatorLogLevel,omitempty"` + + // unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override + // it will end up overlaying in the following order: + // 1. hardcoded defaults + // 2. observedConfig + // 3. unsupportedConfigOverrides + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` + + // observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because + // it is an input to the level for the operator + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + ObservedConfig runtime.RawExtension `json:"observedConfig"` +} + +// +kubebuilder:validation:Enum="";Normal;Debug;Trace;TraceAll +type LogLevel string + +var ( + // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. + Normal LogLevel = "Normal" + + // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. + Debug LogLevel = "Debug" + + // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. + Trace LogLevel = "Trace" + + // TraceAll is used when something is broken at the level of API content/decoding. It will dump complete body content. If you turn this on in a production cluster + // prepare from serious performance issues and massive amounts of logs. In kube, this is probably glog=8. + TraceAll LogLevel = "TraceAll" +) + +type OperatorStatus struct { + // observedGeneration is the last generation change you've dealt with + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // version is the level this availability applies to + // +optional + Version string `json:"version,omitempty"` + + // readyReplicas indicates how many replicas are ready and at the desired state + ReadyReplicas int32 `json:"readyReplicas"` + + // generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + // +optional + Generations []GenerationStatus `json:"generations,omitempty"` +} + +// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +type GenerationStatus struct { + // group is the group of the thing you're tracking + Group string `json:"group"` + // resource is the resource type of the thing you're tracking + Resource string `json:"resource"` + // namespace is where the thing you're tracking is + Namespace string `json:"namespace"` + // name is the name of the thing you're tracking + Name string `json:"name"` + // lastGeneration is the last generation of the workload controller involved + LastGeneration int64 `json:"lastGeneration"` + // hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + Hash string `json:"hash"` +} + +var ( + // Available indicates that the operand is present and accessible in the cluster + OperatorStatusTypeAvailable = "Available" + // Progressing indicates that the operator is trying to transition the operand to a different state + OperatorStatusTypeProgressing = "Progressing" + // Degraded indicates that the operator (not the operand) is unable to fulfill the user intent + OperatorStatusTypeDegraded = "Degraded" + // PrereqsSatisfied indicates that the things this operator depends on are present and at levels compatible with the + // current and desired states. + OperatorStatusTypePrereqsSatisfied = "PrereqsSatisfied" + // Upgradeable indicates that the operator configuration itself (not prereqs) can be auto-upgraded by the CVO + OperatorStatusTypeUpgradeable = "Upgradeable" +) + +// OperatorCondition is just the standard condition fields. +type OperatorCondition struct { + Type string `json:"type"` + Status ConditionStatus `json:"status"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// StaticPodOperatorSpec is spec for controllers that manage static pods. +type StaticPodOperatorSpec struct { + OperatorSpec `json:",inline"` + + // forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. + // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work + // this time instead of failing again on the same config. + ForceRedeploymentReason string `json:"forceRedeploymentReason"` + + // failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + FailedRevisionLimit int32 `json:"failedRevisionLimit,omitempty"` + // succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api + // -1 = unlimited, 0 or unset = 5 (default) + SucceededRevisionLimit int32 `json:"succeededRevisionLimit,omitempty"` +} + +// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual +// node status must be tracked. +type StaticPodOperatorStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableRevision is the deploymentID of the most recent deployment + // +optional + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitEmpty"` + + // latestAvailableRevisionReason describe the detailed reason for the most recent deployment + // +optional + LatestAvailableRevisionReason string `json:"latestAvailableRevisionReason,omitEmpty"` + + // nodeStatuses track the deployment values and errors across individual nodes + // +optional + NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` +} + +// NodeStatus provides information about the current state of a particular node managed by this operator. +type NodeStatus struct { + // nodeName is the name of the node + NodeName string `json:"nodeName"` + + // currentRevision is the generation of the most recently successful deployment + CurrentRevision int32 `json:"currentRevision"` + // targetRevision is the generation of the deployment we're trying to apply + TargetRevision int32 `json:"targetRevision,omitempty"` + + // lastFailedRevision is the generation of the deployment we tried and failed to deploy. + LastFailedRevision int32 `json:"lastFailedRevision,omitempty"` + // lastFailedTime is the time the last failed revision failed the last time. + LastFailedTime *metav1.Time `json:"lastFailedTime,omitempty"` + // lastFailedCount is how often the last failed revision failed. + LastFailedCount int `json:"lastFailedCount,omitempty"` + // lastFailedRevisionErrors is a list of the errors during the failed deployment referenced in lastFailedRevision + LastFailedRevisionErrors []string `json:"lastFailedRevisionErrors,omitempty"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go new file mode 100644 index 0000000000..61c777cf26 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -0,0 +1,51 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Authentication provides information to configure an operator to manage authentication. +type Authentication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec AuthenticationSpec `json:"spec,omitempty"` + // +optional + Status AuthenticationStatus `json:"status,omitempty"` +} + +type AuthenticationSpec struct { + OperatorSpec `json:",inline"` +} + +type AuthenticationStatus struct { + // OAuthAPIServer holds status specific only to oauth-apiserver + // +optional + OAuthAPIServer OAuthAPIServerStatus `json:"oauthAPIServer,omitempty"` + + OperatorStatus `json:",inline"` +} + +type OAuthAPIServerStatus struct { + // LatestAvailableRevision is the latest revision used as suffix of revisioned + // secrets like encryption-config. A new revision causes a new deployment of pods. + // +optional + // +kubebuilder:validation:Minimum=0 + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationList is a collection of items +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go new file mode 100644 index 0000000000..8d1806cd6c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go @@ -0,0 +1,76 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CloudCredential provides a means to configure an operator to manage CredentialsRequests. +type CloudCredential struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec CloudCredentialSpec `json:"spec"` + // +optional + Status CloudCredentialStatus `json:"status"` +} + +// CloudCredentialsMode is the specified mode the cloud-credential-operator +// should reconcile CredentialsRequest with +// +kubebuilder:validation:Enum="";Manual;Mint;Passthrough +type CloudCredentialsMode string + +const ( + // CloudCredentialsModeManual tells cloud-credential-operator to not reconcile any CredentialsRequests + // (primarily used for the disconnected VPC use-cases). + CloudCredentialsModeManual CloudCredentialsMode = "Manual" + + // CloudCredentialsModeMint tells cloud-credential-operator to reconcile all CredentialsRequests + // by minting new users/credentials. + CloudCredentialsModeMint CloudCredentialsMode = "Mint" + + // CloudCredentialsModePassthrough tells cloud-credential-operator to reconcile all CredentialsRequests + // by copying the cloud-specific secret data. + CloudCredentialsModePassthrough CloudCredentialsMode = "Passthrough" + + // CloudCredentialsModeDefault puts CCO into the default mode of operation (per-cloud/platform defaults): + // AWS/Azure/GCP: dynamically determine cluster's cloud credential capabilities to affect + // processing of CredentialsRequests + // All other clouds/platforms (OpenStack, oVirt, vSphere, etc): run in "passthrough" mode + CloudCredentialsModeDefault CloudCredentialsMode = "" +) + +// CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator. +type CloudCredentialSpec struct { + OperatorSpec `json:",inline"` + // CredentialsMode allows informing CCO that it should not attempt to dynamically + // determine the root cloud credentials capabilities, and it should just run in + // the specified mode. + // It also allows putting the operator into "manual" mode if desired. + // Leaving the field in default mode runs CCO so that the cluster's cloud credentials + // will be dynamically probed for capabilities (on supported clouds/platforms). + // Supported modes: + // AWS/Azure/GCP: "" (Default), "Mint", "Passthrough", "Manual" + // Others: Do not set value as other platforms only support running in "Passthrough" + // +optional + CredentialsMode CloudCredentialsMode `json:"credentialsMode,omitempty"` +} + +// CloudCredentialStatus defines the observed status of the cloud-credential-operator. +type CloudCredentialStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type CloudCredentialList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []CloudCredential `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_config.go b/vendor/github.com/openshift/api/operator/v1/types_config.go new file mode 100644 index 0000000000..e073269ffa --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_config.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Config provides information to configure the config operator. It handles installation, migration or synchronization of cloud based cluster configurations like AWS or Azure. +type Config struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Config Operator. + // +kubebuilder:validation:Required + // +required + Spec ConfigSpec `json:"spec"` + + // status defines the observed status of the Config Operator. + // +optional + Status ConfigStatus `json:"status"` +} + +type ConfigSpec struct { + OperatorSpec `json:",inline"` +} + +type ConfigStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigList is a collection of items +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []Config `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go new file mode 100644 index 0000000000..2f6443df70 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -0,0 +1,228 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Console provides a means to configure an operator to manage the console. +type Console struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec ConsoleSpec `json:"spec,omitempty"` + // +optional + Status ConsoleStatus `json:"status,omitempty"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + OperatorSpec `json:",inline"` + // customization is used to optionally provide a small set of + // customization options to the web console. + // +optional + Customization ConsoleCustomization `json:"customization"` + // providers contains configuration for using specific service providers. + Providers ConsoleProviders `json:"providers"` + // route contains hostname and secret reference that contains the serving certificate. + // If a custom route is specified, a new route will be created with the + // provided hostname, under which console will be available. + // In case of custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. + // The default console route will be maintained to reserve the default hostname + // for console if the custom route is removed. + // If not specified, default route will be used. + // DEPRECATED + // +optional + Route ConsoleConfigRoute `json:"route"` + // plugins defines a list of enabled console plugin names. + // +optional + Plugins []string `json:"plugins,omitempty"` +} + +// ConsoleConfigRoute holds information on external route access to console. +// DEPRECATED +type ConsoleConfigRoute struct { + // hostname is the desired custom domain under which console will be available. + Hostname string `json:"hostname"` + // secret points to secret in the openshift-config namespace that contains custom + // certificate and key and needs to be created manually by the cluster admin. + // Referenced Secret is required to contain following key value pairs: + // - "tls.crt" - to specifies custom certificate + // - "tls.key" - to specifies private key of the custom certificate + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + Secret configv1.SecretNameReference `json:"secret"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + OperatorStatus `json:",inline"` +} + +// ConsoleProviders defines a list of optional additional providers of +// functionality to the console. +type ConsoleProviders struct { + // statuspage contains ID for statuspage.io page that provides status info about. + // +optional + Statuspage *StatuspageProvider `json:"statuspage,omitempty"` +} + +// StatuspageProvider provides identity for statuspage account. +type StatuspageProvider struct { + // pageID is the unique ID assigned by Statuspage for your page. This must be a public page. + PageID string `json:"pageID"` +} + +// ConsoleCustomization defines a list of optional configuration for the console UI. +type ConsoleCustomization struct { + // brand is the default branding of the web console which can be overridden by + // providing the brand field. There is a limited set of specific brand options. + // This field controls elements of the console such as the logo. + // Invalid value will prevent a console rollout. + Brand Brand `json:"brand,omitempty"` + // documentationBaseURL links to external documentation are shown in various sections + // of the web console. Providing documentationBaseURL will override the default + // documentation URL. + // Invalid value will prevent a console rollout. + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))\/$` + DocumentationBaseURL string `json:"documentationBaseURL,omitempty"` + // customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog + // instead of the normal OpenShift product name. + // +optional + CustomProductName string `json:"customProductName,omitempty"` + // customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a + // ConfigMap in the openshift-config namespace. This can be created with a command like + // 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. + // Image size must be less than 1 MB due to constraints on the ConfigMap size. + // The ConfigMap key should include a file extension so that the console serves the file + // with the correct MIME type. + // Recommended logo specifications: + // Dimensions: Max height of 68px and max width of 200px + // SVG format preferred + // +optional + CustomLogoFile configv1.ConfigMapFileReference `json:"customLogoFile,omitempty"` + // developerCatalog allows to configure the shown developer catalog categories. + // +kubebuilder:validation:Optional + // +optional + DeveloperCatalog DeveloperConsoleCatalogCustomization `json:"developerCatalog,omitempty"` + // projectAccess allows customizing the available list of ClusterRoles in the Developer perspective + // Project access page which can be used by a project admin to specify roles to other users and + // restrict access within the project. If set, the list will replace the default ClusterRole options. + // +kubebuilder:validation:Optional + // +optional + ProjectAccess ProjectAccess `json:"projectAccess,omitempty"` + // quickStarts allows customization of available ConsoleQuickStart resources in console. + // +kubebuilder:validation:Optional + // +optional + QuickStarts QuickStarts `json:"quickStarts,omitempty"` + // addPage allows customizing actions on the Add page in developer perspective. + // +kubebuilder:validation:Optional + // +optional + AddPage AddPage `json:"addPage,omitempty"` +} + +// ProjectAccess contains options for project access roles +type ProjectAccess struct { + // availableClusterRoles is the list of ClusterRole names that are assignable to users + // through the project access tab. + // +kubebuilder:validation:Optional + // +optional + AvailableClusterRoles []string `json:"availableClusterRoles,omitempty"` +} + +// DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog. +type DeveloperConsoleCatalogCustomization struct { + // categories which are shown in the developer catalog. + // +kubebuilder:validation:Optional + // +optional + Categories []DeveloperConsoleCatalogCategory `json:"categories,omitempty"` +} + +// DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category. +type DeveloperConsoleCatalogCategoryMeta struct { + // ID is an identifier used in the URL to enable deep linking in console. + // ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` + // +required + ID string `json:"id"` + // label defines a category display label. It is required and must have 1-64 characters. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +required + Label string `json:"label"` + // tags is a list of strings that will match the category. A selected category + // show all items which has at least one overlapping tag between category and item. + // +kubebuilder:validation:Optional + // +optional + Tags []string `json:"tags,omitempty"` +} + +// DeveloperConsoleCatalogCategory for the developer console catalog. +type DeveloperConsoleCatalogCategory struct { + // defines top level category ID, label and filter tags. + DeveloperConsoleCatalogCategoryMeta `json:",inline"` + // subcategories defines a list of child categories. + // +kubebuilder:validation:Optional + // +optional + Subcategories []DeveloperConsoleCatalogCategoryMeta `json:"subcategories,omitempty"` +} + +// QuickStarts allow cluster admins to customize available ConsoleQuickStart resources. +type QuickStarts struct { + // disabled is a list of ConsoleQuickStart resource names that are not shown to users. + // +kubebuilder:validation:Optional + // +optional + Disabled []string `json:"disabled,omitempty"` +} + +// AddPage allows customizing actions on the Add page in developer perspective. +type AddPage struct { + // disabledActions is a list of actions that are not shown to users. + // Each action in the list is represented by its ID. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:MinItems=1 + // +optional + DisabledActions []string `json:"disabledActions,omitempty"` +} + +// Brand is a specific supported brand within the console. +// +kubebuilder:validation:Pattern=`^$|^(ocp|origin|okd|dedicated|online|azure)$` +type Brand string + +const ( + // Branding for OpenShift + BrandOpenShift Brand = "openshift" + // Branding for The Origin Community Distribution of Kubernetes + BrandOKD Brand = "okd" + // Branding for OpenShift Online + BrandOnline Brand = "online" + // Branding for OpenShift Container Platform + BrandOCP Brand = "ocp" + // Branding for OpenShift Dedicated + BrandDedicated Brand = "dedicated" + // Branding for Azure Red Hat OpenShift + BrandAzure Brand = "azure" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go new file mode 100644 index 0000000000..09413dc740 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -0,0 +1,71 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterCSIDriver is used to manage and configure CSI driver installed by default +// in OpenShift. An example configuration may look like: +// apiVersion: operator.openshift.io/v1 +// kind: "ClusterCSIDriver" +// metadata: +// name: "ebs.csi.aws.com" +// spec: +// logLevel: Debug + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterCSIDriver object allows management and configuration of a CSI driver operator +// installed by default in OpenShift. Name of the object must be name of the CSI driver +// it operates. See CSIDriverName type for list of allowed values. +type ClusterCSIDriver struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ClusterCSIDriverSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ClusterCSIDriverStatus `json:"status"` +} + +// CSIDriverName is the name of the CSI driver +type CSIDriverName string + +// If you are adding a new driver name here, ensure that kubebuilder:validation:Enum is updated above +// and 0000_90_cluster_csi_driver_01_config.crd.yaml-merge-patch file is also updated with new driver name. +const ( + AWSEBSCSIDriver CSIDriverName = "ebs.csi.aws.com" + AzureDiskCSIDriver CSIDriverName = "disk.csi.azure.com" + GCPPDCSIDriver CSIDriverName = "pd.csi.storage.gke.io" + CinderCSIDriver CSIDriverName = "cinder.csi.openstack.org" + VSphereCSIDriver CSIDriverName = "csi.vsphere.vmware.com" + ManilaCSIDriver CSIDriverName = "manila.csi.openstack.org" + OvirtCSIDriver CSIDriverName = "csi.ovirt.org" + KubevirtCSIDriver CSIDriverName = "csi.kubevirt.io" +) + +// ClusterCSIDriverSpec is the desired behavior of CSI driver operator +type ClusterCSIDriverSpec struct { + OperatorSpec `json:",inline"` +} + +// ClusterCSIDriverStatus is the observed status of CSI driver operator +type ClusterCSIDriverStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// ClusterCSIDriverList contains a list of ClusterCSIDriver +type ClusterCSIDriverList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterCSIDriver `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go new file mode 100644 index 0000000000..5b6c06aaff --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go @@ -0,0 +1,44 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name. +type CSISnapshotController struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec CSISnapshotControllerSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status CSISnapshotControllerStatus `json:"status"` +} + +// CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator. +type CSISnapshotControllerSpec struct { + OperatorSpec `json:",inline"` +} + +// CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator. +type CSISnapshotControllerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// CSISnapshotControllerList contains a list of CSISnapshotControllers. +type CSISnapshotControllerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CSISnapshotController `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go new file mode 100644 index 0000000000..ce4cf32389 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -0,0 +1,178 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + corev1 "k8s.io/api/core/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=dnses,scope=Cluster +// +kubebuilder:subresource:status + +// DNS manages the CoreDNS component to provide a name resolution service +// for pods and services in the cluster. +// +// This supports the DNS-based service discovery specification: +// https://github.com/kubernetes/dns/blob/master/docs/specification.md +// +// More details: https://kubernetes.io/docs/tasks/administer-cluster/coredns +type DNS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the DNS. + Spec DNSSpec `json:"spec,omitempty"` + // status is the most recently observed status of the DNS. + Status DNSStatus `json:"status,omitempty"` +} + +// DNSSpec is the specification of the desired behavior of the DNS. +type DNSSpec struct { + // servers is a list of DNS resolvers that provide name query delegation for one or + // more subdomains outside the scope of the cluster domain. If servers consists of + // more than one Server, longest suffix match will be used to determine the Server. + // + // For example, if there are two Servers, one for "foo.com" and another for "a.foo.com", + // and the name query is for "www.a.foo.com", it will be routed to the Server with Zone + // "a.foo.com". + // + // If this field is nil, no servers are created. + // + // +optional + Servers []Server `json:"servers,omitempty"` + + // nodePlacement provides explicit control over the scheduling of DNS + // pods. + // + // Generally, it is useful to run a DNS pod on every node so that DNS + // queries are always handled by a local DNS pod instead of going over + // the network to a DNS pod on another node. However, security policies + // may require restricting the placement of DNS pods to specific nodes. + // For example, if a security policy prohibits pods on arbitrary nodes + // from communicating with the API, a node selector can be specified to + // restrict DNS pods to nodes that are permitted to communicate with the + // API. Conversely, if running DNS pods on nodes with a particular + // taint is desired, a toleration can be specified for that taint. + // + // If unset, defaults are used. See nodePlacement for more details. + // + // +optional + NodePlacement DNSNodePlacement `json:"nodePlacement,omitempty"` +} + +// Server defines the schema for a server that runs per instance of CoreDNS. +type Server struct { + // name is required and specifies a unique name for the server. Name must comply + // with the Service Name Syntax of rfc6335. + Name string `json:"name"` + // zones is required and specifies the subdomains that Server is authoritative for. + // Zones must conform to the rfc1123 definition of a subdomain. Specifying the + // cluster domain (i.e., "cluster.local") is invalid. + Zones []string `json:"zones"` + // forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages + // to upstream resolvers. + ForwardPlugin ForwardPlugin `json:"forwardPlugin"` +} + +// ForwardPlugin defines a schema for configuring the CoreDNS forward plugin. +type ForwardPlugin struct { + // upstreams is a list of resolvers to forward name queries for subdomains of Zones. + // Upstreams are randomized when more than 1 upstream is specified. Each instance of + // CoreDNS performs health checking of Upstreams. When a healthy upstream returns an + // error during the exchange, another resolver is tried from Upstreams. Each upstream + // is represented by an IP address or IP:port if the upstream listens on a port other + // than 53. + // + // A maximum of 15 upstreams is allowed per ForwardPlugin. + // + // +kubebuilder:validation:MaxItems=15 + Upstreams []string `json:"upstreams"` +} + +// DNSNodePlacement describes the node scheduling configuration for DNS pods. +type DNSNodePlacement struct { + // nodeSelector is the node selector applied to DNS pods. + // + // If empty, the default is used, which is currently the following: + // + // kubernetes.io/os: linux + // + // This default is subject to change. + // + // If set, the specified selector is used and replaces the default. + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to DNS pods. + // + // The default is an empty list. This default is subject to change. + // + // See https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +const ( + // Available indicates the DNS controller daemonset is available. + DNSAvailable = "Available" +) + +// DNSStatus defines the observed status of the DNS. +type DNSStatus struct { + // clusterIP is the service IP through which this DNS is made available. + // + // In the case of the default DNS, this will be a well known IP that is used + // as the default nameserver for pods that are using the default ClusterFirst DNS policy. + // + // In general, this IP can be specified in a pod's spec.dnsConfig.nameservers list + // or used explicitly when performing name resolution from within the cluster. + // Example: dig foo.com @ + // + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // + // +kubebuilder:validation:Required + // +required + ClusterIP string `json:"clusterIP"` + + // clusterDomain is the local cluster DNS domain suffix for DNS services. + // This will be a subdomain as defined in RFC 1034, + // section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 + // Example: "cluster.local" + // + // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service + // + // +kubebuilder:validation:Required + // +required + ClusterDomain string `json:"clusterDomain"` + + // conditions provide information about the state of the DNS on the cluster. + // + // These are the supported DNS conditions: + // + // * Available + // - True if the following conditions are met: + // * DNS controller daemonset is available. + // - False if any of those conditions are unsatisfied. + // + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []OperatorCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// DNSList contains a list of DNS +type DNSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []DNS `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go new file mode 100644 index 0000000000..106c92b813 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Etcd provides information to configure an operator to manage etcd. +type Etcd struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec EtcdSpec `json:"spec"` + // +optional + Status EtcdStatus `json:"status"` +} + +type EtcdSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type EtcdStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPISOperatorConfigList is a collection of items +type EtcdList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []Etcd `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go new file mode 100644 index 0000000000..126b53cf0d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -0,0 +1,1170 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + corev1 "k8s.io/api/core/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.availableReplicas,selectorpath=.status.selector + +// IngressController describes a managed ingress controller for the cluster. The +// controller can service OpenShift Route and Kubernetes Ingress resources. +// +// When an IngressController is created, a new ingress controller deployment is +// created to allow external traffic to reach the services that expose Ingress +// or Route resources. Updating this resource may lead to disruption for public +// facing network connections as a new ingress controller revision may be rolled +// out. +// +// https://kubernetes.io/docs/concepts/services-networking/ingress-controllers +// +// Whenever possible, sensible defaults for the platform are used. See each +// field for more details. +type IngressController struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired behavior of the IngressController. + Spec IngressControllerSpec `json:"spec,omitempty"` + // status is the most recently observed status of the IngressController. + Status IngressControllerStatus `json:"status,omitempty"` +} + +// IngressControllerSpec is the specification of the desired behavior of the +// IngressController. +type IngressControllerSpec struct { + // domain is a DNS name serviced by the ingress controller and is used to + // configure multiple features: + // + // * For the LoadBalancerService endpoint publishing strategy, domain is + // used to configure DNS records. See endpointPublishingStrategy. + // + // * When using a generated default certificate, the certificate will be valid + // for domain and its subdomains. See defaultCertificate. + // + // * The value is published to individual Route statuses so that end-users + // know where to target external DNS records. + // + // domain must be unique among all IngressControllers, and cannot be + // updated. + // + // If empty, defaults to ingress.config.openshift.io/cluster .spec.domain. + // + // +optional + Domain string `json:"domain,omitempty"` + + // httpErrorCodePages specifies a configmap with custom error pages. + // The administrator must create this configmap in the openshift-config namespace. + // This configmap should have keys in the format "error-page-.http", + // where is an HTTP error code. + // For example, "error-page-503.http" defines an error page for HTTP 503 responses. + // Currently only error pages for 503 and 404 responses can be customized. + // Each value in the configmap should be the full response, including HTTP headers. + // Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http + // If this field is empty, the ingress controller uses the default error pages. + HttpErrorCodePages configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"` + + // replicas is the desired number of ingress controller replicas. If unset, + // defaults to 2. + // + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // endpointPublishingStrategy is used to publish the ingress controller + // endpoints to other networks, enable load balancer integrations, etc. + // + // If unset, the default is based on + // infrastructure.config.openshift.io/cluster .status.platform: + // + // AWS: LoadBalancerService (with External scope) + // Azure: LoadBalancerService (with External scope) + // GCP: LoadBalancerService (with External scope) + // IBMCloud: LoadBalancerService (with External scope) + // Libvirt: HostNetwork + // + // Any other platform types (including None) default to HostNetwork. + // + // endpointPublishingStrategy cannot be updated. + // + // +optional + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // defaultCertificate is a reference to a secret containing the default + // certificate served by the ingress controller. When Routes don't specify + // their own certificate, defaultCertificate is used. + // + // The secret must contain the following keys and data: + // + // tls.crt: certificate file contents + // tls.key: key file contents + // + // If unset, a wildcard certificate is automatically generated and used. The + // certificate is valid for the ingress controller domain (and subdomains) and + // the generated certificate's CA will be automatically integrated with the + // cluster's trust store. + // + // If a wildcard certificate is used and shared by multiple + // HTTP/2 enabled routes (which implies ALPN) then clients + // (i.e., notably browsers) are at liberty to reuse open + // connections. This means a client can reuse a connection to + // another route and that is likely to fail. This behaviour is + // generally known as connection coalescing. + // + // The in-use certificate (whether generated or user-specified) will be + // automatically integrated with OpenShift's built-in OAuth server. + // + // +optional + DefaultCertificate *corev1.LocalObjectReference `json:"defaultCertificate,omitempty"` + + // namespaceSelector is used to filter the set of namespaces serviced by the + // ingress controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // routeSelector is used to filter the set of Routes serviced by the ingress + // controller. This is useful for implementing shards. + // + // If unset, the default is no filtering. + // + // +optional + RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"` + + // nodePlacement enables explicit control over the scheduling of the ingress + // controller. + // + // If unset, defaults are used. See NodePlacement for more details. + // + // +optional + NodePlacement *NodePlacement `json:"nodePlacement,omitempty"` + + // tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. + // + // If unset, the default is based on the apiservers.config.openshift.io/cluster resource. + // + // Note that when using the Old, Intermediate, and Modern profile types, the effective + // profile configuration is subject to change between releases. For example, given + // a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade + // to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress + // controller, resulting in a rollout. + // + // Note that the minimum TLS version for ingress controllers is 1.1, and + // the maximum TLS version is 1.2. An implication of this restriction + // is that the Modern TLS profile type cannot be used because it + // requires TLS 1.3. + // + // +optional + TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + + // routeAdmission defines a policy for handling new route claims (for example, + // to allow or deny claims across namespaces). + // + // If empty, defaults will be applied. See specific routeAdmission fields + // for details about their defaults. + // + // +optional + RouteAdmission *RouteAdmissionPolicy `json:"routeAdmission,omitempty"` + + // logging defines parameters for what should be logged where. If this + // field is empty, operational logs are enabled but access logs are + // disabled. + // + // +optional + Logging *IngressControllerLogging `json:"logging,omitempty"` + + // httpHeaders defines policy for HTTP headers. + // + // If this field is empty, the default values are used. + // + // +optional + HTTPHeaders *IngressControllerHTTPHeaders `json:"httpHeaders,omitempty"` + + // tuningOptions defines parameters for adjusting the performance of + // ingress controller pods. All fields are optional and will use their + // respective defaults if not set. See specific tuningOptions fields for + // more details. + // + // Setting fields within tuningOptions is generally not recommended. The + // default values are suitable for most configurations. + // + // +optional + TuningOptions IngressControllerTuningOptions `json:"tuningOptions,omitempty"` + + // unsupportedConfigOverrides allows specifying unsupported + // configuration options. Its use is unsupported. + // + // +optional + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` +} + +// NodePlacement describes node scheduling configuration for an ingress +// controller. +type NodePlacement struct { + // nodeSelector is the node selector applied to ingress controller + // deployments. + // + // If unset, the default is: + // + // kubernetes.io/os: linux + // node-role.kubernetes.io/worker: '' + // + // If set, the specified selector is used and replaces the default. + // + // +optional + NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` + + // tolerations is a list of tolerations applied to ingress controller + // deployments. + // + // The default is an empty list. + // + // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + // + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// EndpointPublishingStrategyType is a way to publish ingress controller endpoints. +// +kubebuilder:validation:Enum=LoadBalancerService;HostNetwork;Private;NodePortService +type EndpointPublishingStrategyType string + +const ( + // LoadBalancerService publishes the ingress controller using a Kubernetes + // LoadBalancer Service. + LoadBalancerServiceStrategyType EndpointPublishingStrategyType = "LoadBalancerService" + + // HostNetwork publishes the ingress controller on node ports where the + // ingress controller is deployed. + HostNetworkStrategyType EndpointPublishingStrategyType = "HostNetwork" + + // Private does not publish the ingress controller. + PrivateStrategyType EndpointPublishingStrategyType = "Private" + + // NodePortService publishes the ingress controller using a Kubernetes NodePort Service. + NodePortServiceStrategyType EndpointPublishingStrategyType = "NodePortService" +) + +// LoadBalancerScope is the scope at which a load balancer is exposed. +// +kubebuilder:validation:Enum=Internal;External +type LoadBalancerScope string + +var ( + // InternalLoadBalancer is a load balancer that is exposed only on the + // cluster's private network. + InternalLoadBalancer LoadBalancerScope = "Internal" + + // ExternalLoadBalancer is a load balancer that is exposed on the + // cluster's public network (which is typically on the Internet). + ExternalLoadBalancer LoadBalancerScope = "External" +) + +// LoadBalancerStrategy holds parameters for a load balancer. +type LoadBalancerStrategy struct { + // scope indicates the scope at which the load balancer is exposed. + // Possible values are "External" and "Internal". + // + // +kubebuilder:validation:Required + // +required + Scope LoadBalancerScope `json:"scope"` + + // providerParameters holds desired load balancer information specific to + // the underlying infrastructure provider. + // + // If empty, defaults will be applied. See specific providerParameters + // fields for details about their defaults. + // + // +optional + ProviderParameters *ProviderLoadBalancerParameters `json:"providerParameters,omitempty"` +} + +// ProviderLoadBalancerParameters holds desired load balancer information +// specific to the underlying infrastructure provider. +// +union +type ProviderLoadBalancerParameters struct { + // type is the underlying infrastructure provider for the load balancer. + // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "OpenStack", + // and "VSphere". + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + Type LoadBalancerProviderType `json:"type"` + + // aws provides configuration settings that are specific to AWS + // load balancers. + // + // If empty, defaults will be applied. See specific aws fields for + // details about their defaults. + // + // +optional + AWS *AWSLoadBalancerParameters `json:"aws,omitempty"` + + // gcp provides configuration settings that are specific to GCP + // load balancers. + // + // If empty, defaults will be applied. See specific gcp fields for + // details about their defaults. + // + // +optional + GCP *GCPLoadBalancerParameters `json:"gcp,omitempty"` +} + +// LoadBalancerProviderType is the underlying infrastructure provider for the +// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", +// "OpenStack", and "VSphere". +// +// +kubebuilder:validation:Enum=AWS;Azure;BareMetal;GCP;OpenStack;VSphere;IBM +type LoadBalancerProviderType string + +const ( + AWSLoadBalancerProvider LoadBalancerProviderType = "AWS" + AzureLoadBalancerProvider LoadBalancerProviderType = "Azure" + GCPLoadBalancerProvider LoadBalancerProviderType = "GCP" + OpenStackLoadBalancerProvider LoadBalancerProviderType = "OpenStack" + VSphereLoadBalancerProvider LoadBalancerProviderType = "VSphere" + IBMLoadBalancerProvider LoadBalancerProviderType = "IBM" + BareMetalLoadBalancerProvider LoadBalancerProviderType = "BareMetal" +) + +// AWSLoadBalancerParameters provides configuration settings that are +// specific to AWS load balancers. +// +union +type AWSLoadBalancerParameters struct { + // type is the type of AWS load balancer to instantiate for an ingresscontroller. + // + // Valid values are: + // + // * "Classic": A Classic Load Balancer that makes routing decisions at either + // the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See + // the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + // + // * "NLB": A Network Load Balancer that makes routing decisions at the + // transport layer (TCP/SSL). See the following for additional details: + // + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + Type AWSLoadBalancerType `json:"type"` + + // classicLoadBalancerParameters holds configuration parameters for an AWS + // classic load balancer. Present only if type is Classic. + // + // +optional + ClassicLoadBalancerParameters *AWSClassicLoadBalancerParameters `json:"classicLoadBalancer,omitempty"` + + // networkLoadBalancerParameters holds configuration parameters for an AWS + // network load balancer. Present only if type is NLB. + // + // +optional + NetworkLoadBalancerParameters *AWSNetworkLoadBalancerParameters `json:"networkLoadBalancer,omitempty"` +} + +// AWSLoadBalancerType is the type of AWS load balancer to instantiate. +// +kubebuilder:validation:Enum=Classic;NLB +type AWSLoadBalancerType string + +const ( + AWSClassicLoadBalancer AWSLoadBalancerType = "Classic" + AWSNetworkLoadBalancer AWSLoadBalancerType = "NLB" +) + +// GCPLoadBalancerParameters provides configuration settings that are +// specific to GCP load balancers. +type GCPLoadBalancerParameters struct { + // clientAccess describes how client access is restricted for internal + // load balancers. + // + // Valid values are: + // * "Global": Specifying an internal load balancer with Global client access + // allows clients from any region within the VPC to communicate with the load + // balancer. + // + // https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access + // + // * "Local": Specifying an internal load balancer with Local client access + // means only clients within the same region (and VPC) as the GCP load balancer + // can communicate with the load balancer. Note that this is the default behavior. + // + // https://cloud.google.com/load-balancing/docs/internal#client_access + // + // +optional + ClientAccess GCPClientAccess `json:"clientAccess,omitempty"` +} + +// GCPClientAccess describes how client access is restricted for internal +// load balancers. +// +kubebuilder:validation:Enum=Global;Local +type GCPClientAccess string + +const ( + GCPGlobalAccess GCPClientAccess = "Global" + GCPLocalAccess GCPClientAccess = "Local" +) + +// AWSClassicLoadBalancerParameters holds configuration parameters for an +// AWS Classic load balancer. +type AWSClassicLoadBalancerParameters struct { +} + +// AWSNetworkLoadBalancerParameters holds configuration parameters for an +// AWS Network load balancer. +type AWSNetworkLoadBalancerParameters struct { +} + +// HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing +// strategy. +type HostNetworkStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +kubebuilder:validation:Optional + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// PrivateStrategy holds parameters for the Private endpoint publishing +// strategy. +type PrivateStrategy struct { +} + +// NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy. +type NodePortStrategy struct { + // protocol specifies whether the IngressController expects incoming + // connections to use plain TCP or whether the IngressController expects + // PROXY protocol. + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // The following values are valid for this field: + // + // * The empty string. + // * "TCP". + // * "PROXY". + // + // The empty string specifies the default, which is TCP without PROXY + // protocol. Note that the default is subject to change. + // + // +kubebuilder:validation:Optional + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + +// IngressControllerProtocol specifies whether PROXY protocol is enabled or not. +// +kubebuilder:validation:Enum="";TCP;PROXY +type IngressControllerProtocol string + +const ( + DefaultProtocol IngressControllerProtocol = "" + TCPProtocol IngressControllerProtocol = "TCP" + ProxyProtocol IngressControllerProtocol = "PROXY" +) + +// EndpointPublishingStrategy is a way to publish the endpoints of an +// IngressController, and represents the type and any additional configuration +// for a specific type. +// +union +type EndpointPublishingStrategy struct { + // type is the publishing strategy to use. Valid values are: + // + // * LoadBalancerService + // + // Publishes the ingress controller using a Kubernetes LoadBalancer Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A LoadBalancer Service is created to publish the deployment. + // + // See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + // + // If domain is set, a wildcard DNS record will be managed to point at the + // LoadBalancer Service's external name. DNS records are managed only in DNS + // zones defined by dns.config.openshift.io/cluster .spec.publicZone and + // .spec.privateZone. + // + // Wildcard DNS management is currently supported only on the AWS, Azure, + // and GCP platforms. + // + // * HostNetwork + // + // Publishes the ingress controller on node ports where the ingress controller + // is deployed. + // + // In this configuration, the ingress controller deployment uses host + // networking, bound to node ports 80 and 443. The user is responsible for + // configuring an external load balancer to publish the ingress controller via + // the node ports. + // + // * Private + // + // Does not publish the ingress controller. + // + // In this configuration, the ingress controller deployment uses container + // networking, and is not explicitly published. The user must manually publish + // the ingress controller. + // + // * NodePortService + // + // Publishes the ingress controller using a Kubernetes NodePort Service. + // + // In this configuration, the ingress controller deployment uses container + // networking. A NodePort Service is created to publish the deployment. The + // specific node ports are dynamically allocated by OpenShift; however, to + // support static port allocations, user changes to the node port + // field of the managed NodePort Service will preserved. + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + Type EndpointPublishingStrategyType `json:"type"` + + // loadBalancer holds parameters for the load balancer. Present only if + // type is LoadBalancerService. + // +optional + LoadBalancer *LoadBalancerStrategy `json:"loadBalancer,omitempty"` + + // hostNetwork holds parameters for the HostNetwork endpoint publishing + // strategy. Present only if type is HostNetwork. + // +optional + HostNetwork *HostNetworkStrategy `json:"hostNetwork,omitempty"` + + // private holds parameters for the Private endpoint publishing + // strategy. Present only if type is Private. + // +optional + Private *PrivateStrategy `json:"private,omitempty"` + + // nodePort holds parameters for the NodePortService endpoint publishing strategy. + // Present only if type is NodePortService. + // +optional + NodePort *NodePortStrategy `json:"nodePort,omitempty"` +} + +// RouteAdmissionPolicy is an admission policy for allowing new route claims. +type RouteAdmissionPolicy struct { + // namespaceOwnership describes how host name claims across namespaces should + // be handled. + // + // Value must be one of: + // + // - Strict: Do not allow routes in different namespaces to claim the same host. + // + // - InterNamespaceAllowed: Allow routes to claim different paths of the same + // host name across namespaces. + // + // If empty, the default is Strict. + // +optional + NamespaceOwnership NamespaceOwnershipCheck `json:"namespaceOwnership,omitempty"` + // wildcardPolicy describes how routes with wildcard policies should + // be handled for the ingress controller. WildcardPolicy controls use + // of routes [1] exposed by the ingress controller based on the route's + // wildcard policy. + // + // [1] https://github.com/openshift/api/blob/master/route/v1/types.go + // + // Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed + // will cause admitted routes with a wildcard policy of Subdomain to stop + // working. These routes must be updated to a wildcard policy of None to be + // readmitted by the ingress controller. + // + // WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values. + // + // If empty, defaults to "WildcardsDisallowed". + // + WildcardPolicy WildcardPolicy `json:"wildcardPolicy,omitempty"` +} + +// WildcardPolicy is a route admission policy component that describes how +// routes with a wildcard policy should be handled. +// +kubebuilder:validation:Enum=WildcardsAllowed;WildcardsDisallowed +type WildcardPolicy string + +const ( + // WildcardPolicyAllowed indicates routes with any wildcard policy are + // admitted by the ingress controller. + WildcardPolicyAllowed WildcardPolicy = "WildcardsAllowed" + + // WildcardPolicyDisallowed indicates only routes with a wildcard policy + // of None are admitted by the ingress controller. + WildcardPolicyDisallowed WildcardPolicy = "WildcardsDisallowed" +) + +// NamespaceOwnershipCheck is a route admission policy component that describes +// how host name claims across namespaces should be handled. +// +kubebuilder:validation:Enum=InterNamespaceAllowed;Strict +type NamespaceOwnershipCheck string + +const ( + // InterNamespaceAllowedOwnershipCheck allows routes to claim different paths of the same host name across namespaces. + InterNamespaceAllowedOwnershipCheck NamespaceOwnershipCheck = "InterNamespaceAllowed" + + // StrictNamespaceOwnershipCheck does not allow routes to claim the same host name across namespaces. + StrictNamespaceOwnershipCheck NamespaceOwnershipCheck = "Strict" +) + +// LoggingDestinationType is a type of destination to which to send log +// messages. +// +// +kubebuilder:validation:Enum=Container;Syslog +type LoggingDestinationType string + +const ( + // Container sends log messages to a sidecar container. + ContainerLoggingDestinationType LoggingDestinationType = "Container" + + // Syslog sends log messages to a syslog endpoint. + SyslogLoggingDestinationType LoggingDestinationType = "Syslog" + + // ContainerLoggingSidecarContainerName is the name of the container + // with the log output in an ingress controller pod when container + // logging is used. + ContainerLoggingSidecarContainerName = "logs" +) + +// SyslogLoggingDestinationParameters describes parameters for the Syslog +// logging destination type. +type SyslogLoggingDestinationParameters struct { + // address is the IP address of the syslog endpoint that receives log + // messages. + // + // +kubebuilder:validation:Required + // +required + Address string `json:"address"` + + // port is the UDP port number of the syslog endpoint that receives log + // messages. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +required + Port uint32 `json:"port"` + + // facility specifies the syslog facility of log messages. + // + // If this field is empty, the facility is "local1". + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;cron;auth2;ftp;ntp;audit;alert;cron2;local0;local1;local2;local3;local4;local5;local6;local7 + // +optional + Facility string `json:"facility,omitempty"` +} + +// ContainerLoggingDestinationParameters describes parameters for the Container +// logging destination type. +type ContainerLoggingDestinationParameters struct { +} + +// LoggingDestination describes a destination for log messages. +// +union +type LoggingDestination struct { + // type is the type of destination for logs. It must be one of the + // following: + // + // * Container + // + // The ingress operator configures the sidecar container named "logs" on + // the ingress controller pod and configures the ingress controller to + // write logs to the sidecar. The logs are then available as container + // logs. The expectation is that the administrator configures a custom + // logging solution that reads logs from this sidecar. Note that using + // container logs means that logs may be dropped if the rate of logs + // exceeds the container runtime's or the custom logging solution's + // capacity. + // + // * Syslog + // + // Logs are sent to a syslog endpoint. The administrator must specify + // an endpoint that can receive syslog messages. The expectation is + // that the administrator has configured a custom syslog instance. + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + Type LoggingDestinationType `json:"type"` + + // syslog holds parameters for a syslog endpoint. Present only if + // type is Syslog. + // + // +optional + Syslog *SyslogLoggingDestinationParameters `json:"syslog,omitempty"` + + // container holds parameters for the Container logging destination. + // Present only if type is Container. + // + // +optional + Container *ContainerLoggingDestinationParameters `json:"container,omitempty"` +} + +// IngressControllerCaptureHTTPHeader describes an HTTP header that should be +// captured. +type IngressControllerCaptureHTTPHeader struct { + // name specifies a header name. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +required + Name string `json:"name"` + + // maxLength specifies a maximum length for the header value. If a + // header value exceeds this length, the value will be truncated in the + // log message. Note that the ingress controller may impose a separate + // bound on the total length of HTTP headers in a request. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +required + MaxLength int `json:"maxLength"` +} + +// IngressControllerCaptureHTTPHeaders specifies which HTTP headers the +// IngressController captures. +type IngressControllerCaptureHTTPHeaders struct { + // request specifies which HTTP request headers to capture. + // + // If this field is empty, no request headers are captured. + // + // +nullable + // +optional + Request []IngressControllerCaptureHTTPHeader `json:"request,omitempty"` + + // response specifies which HTTP response headers to capture. + // + // If this field is empty, no response headers are captured. + // + // +nullable + // +optional + Response []IngressControllerCaptureHTTPHeader `json:"response,omitempty"` +} + +// CookieMatchType indicates the type of matching used against cookie names to +// select a cookie for capture. +// +kubebuilder:validation:Enum=Exact;Prefix +type CookieMatchType string + +const ( + // CookieMatchTypeExact indicates that an exact string match should be + // performed. + CookieMatchTypeExact CookieMatchType = "Exact" + // CookieMatchTypePrefix indicates that a string prefix match should be + // performed. + CookieMatchTypePrefix CookieMatchType = "Prefix" +) + +// IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be +// captured. +type IngressControllerCaptureHTTPCookie struct { + IngressControllerCaptureHTTPCookieUnion `json:",inline"` + + // maxLength specifies a maximum length of the string that will be + // logged, which includes the cookie name, cookie value, and + // one-character delimiter. If the log entry exceeds this length, the + // value will be truncated in the log message. Note that the ingress + // controller may impose a separate bound on the total length of HTTP + // headers in a request. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=1024 + // +required + MaxLength int `json:"maxLength"` +} + +// IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured. +// +union +type IngressControllerCaptureHTTPCookieUnion struct { + // matchType specifies the type of match to be performed on the cookie + // name. Allowed values are "Exact" for an exact string match and + // "Prefix" for a string prefix match. If "Exact" is specified, a name + // must be specified in the name field. If "Prefix" is provided, a + // prefix must be specified in the namePrefix field. For example, + // specifying matchType "Prefix" and namePrefix "foo" will capture a + // cookie named "foo" or "foobar" but not one named "bar". The first + // matching cookie is captured. + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +required + MatchType CookieMatchType `json:"matchType,omitempty"` + + // name specifies a cookie name. Its value must be a valid HTTP cookie + // name as defined in RFC 6265 section 4.1. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + // +optional + Name string `json:"name"` + + // namePrefix specifies a cookie name prefix. Its value must be a valid + // HTTP cookie name as defined in RFC 6265 section 4.1. + // + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + // +optional + NamePrefix string `json:"namePrefix"` +} + +// AccessLogging describes how client requests should be logged. +type AccessLogging struct { + // destination is where access logs go. + // + // +kubebuilder:validation:Required + // +required + Destination LoggingDestination `json:"destination"` + + // httpLogFormat specifies the format of the log message for an HTTP + // request. + // + // If this field is empty, log messages use the implementation's default + // HTTP log format. For HAProxy's default HTTP log format, see the + // HAProxy documentation: + // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + // + // Note that this format only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). It does not affect the log format for TLS passthrough + // connections. + // + // +optional + HttpLogFormat string `json:"httpLogFormat,omitempty"` + + // httpCaptureHeaders defines HTTP headers that should be captured in + // access logs. If this field is empty, no headers are captured. + // + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be captured for TLS passthrough + // connections. + // + // +optional + HTTPCaptureHeaders IngressControllerCaptureHTTPHeaders `json:"httpCaptureHeaders,omitempty"` + + // httpCaptureCookies specifies HTTP cookies that should be captured in + // access logs. If this field is empty, no cookies are captured. + // + // +nullable + // +optional + // +kubebuilder:validation:MaxItems=1 + HTTPCaptureCookies []IngressControllerCaptureHTTPCookie `json:"httpCaptureCookies,omitempty"` +} + +// IngressControllerLogging describes what should be logged where. +type IngressControllerLogging struct { + // access describes how the client requests should be logged. + // + // If this field is empty, access logging is disabled. + // + // +optional + Access *AccessLogging `json:"access,omitempty"` +} + +// IngressControllerHTTPHeaderPolicy is a policy for setting HTTP headers. +// +// +kubebuilder:validation:Enum=Append;Replace;IfNone;Never +type IngressControllerHTTPHeaderPolicy string + +const ( + // AppendHTTPHeaderPolicy appends the header, preserving any existing header. + AppendHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Append" + // ReplaceHTTPHeaderPolicy sets the header, removing any existing header. + ReplaceHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Replace" + // IfNoneHTTPHeaderPolicy sets the header if it is not already set. + IfNoneHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "IfNone" + // NeverHTTPHeaderPolicy never sets the header, preserving any existing + // header. + NeverHTTPHeaderPolicy IngressControllerHTTPHeaderPolicy = "Never" +) + +// IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a +// unique id header. +type IngressControllerHTTPUniqueIdHeaderPolicy struct { + // name specifies the name of the HTTP header (for example, "unique-id") + // that the ingress controller should inject into HTTP requests. The + // field's value must be a valid HTTP header name as defined in RFC 2616 + // section 4.2. If the field is empty, no header is injected. + // + // +optional + // +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + Name string `json:"name,omitempty"` + + // format specifies the format for the injected HTTP header's value. + // This field has no effect unless name is specified. For the + // HAProxy-based ingress controller implementation, this format uses the + // same syntax as the HTTP log format. If the field is empty, the + // default value is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the + // corresponding HAProxy documentation: + // http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 + // + // +optional + // +kubebuilder:validation:Pattern="^(%(%|(\\{[-+]?[QXE](,[-+]?[QXE])*\\})?([A-Za-z]+|\\[[.0-9A-Z_a-z]+(\\([^)]+\\))?(,[.0-9A-Z_a-z]+(\\([^)]+\\))?)*\\]))|[^%[:cntrl:]])*$" + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:MaxLength=1024 + Format string `json:"format,omitempty"` +} + +// IngressControllerHTTPHeaderNameCaseAdjustment is the name of an HTTP header +// (for example, "X-Forwarded-For") in the desired capitalization. The value +// must be a valid HTTP header name as defined in RFC 2616 section 4.2. +// +// +optional +// +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" +// +kubebuilder:validation:MinLength=0 +// +kubebuilder:validation:MaxLength=1024 +type IngressControllerHTTPHeaderNameCaseAdjustment string + +// IngressControllerHTTPHeaders specifies how the IngressController handles +// certain HTTP headers. +type IngressControllerHTTPHeaders struct { + // forwardedHeaderPolicy specifies when and how the IngressController + // sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, + // X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version + // HTTP headers. The value may be one of the following: + // + // * "Append", which specifies that the IngressController appends the + // headers, preserving existing headers. + // + // * "Replace", which specifies that the IngressController sets the + // headers, replacing any existing Forwarded or X-Forwarded-* headers. + // + // * "IfNone", which specifies that the IngressController sets the + // headers if they are not already set. + // + // * "Never", which specifies that the IngressController never sets the + // headers, preserving any existing headers. + // + // By default, the policy is "Append". + // + // +optional + ForwardedHeaderPolicy IngressControllerHTTPHeaderPolicy `json:"forwardedHeaderPolicy,omitempty"` + + // uniqueId describes configuration for a custom HTTP header that the + // ingress controller should inject into incoming HTTP requests. + // Typically, this header is configured to have a value that is unique + // to the HTTP request. The header can be used by applications or + // included in access logs to facilitate tracing individual HTTP + // requests. + // + // If this field is empty, no such header is injected into requests. + // + // +optional + UniqueId IngressControllerHTTPUniqueIdHeaderPolicy `json:"uniqueId,omitempty"` + + // headerNameCaseAdjustments specifies case adjustments that can be + // applied to HTTP header names. Each adjustment is specified as an + // HTTP header name with the desired capitalization. For example, + // specifying "X-Forwarded-For" indicates that the "x-forwarded-for" + // HTTP header should be adjusted to have the specified capitalization. + // + // These adjustments are only applied to cleartext, edge-terminated, and + // re-encrypt routes, and only when using HTTP/1. + // + // For request headers, these adjustments are applied only for routes + // that have the haproxy.router.openshift.io/h1-adjust-case=true + // annotation. For response headers, these adjustments are applied to + // all HTTP responses. + // + // If this field is empty, no request headers are adjusted. + // + // +nullable + // +optional + HeaderNameCaseAdjustments []IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"` +} + +// IngressControllerTuningOptions specifies options for tuning the performance +// of ingress controller pods +type IngressControllerTuningOptions struct { + // headerBufferBytes describes how much memory should be reserved + // (in bytes) for IngressController connection sessions. + // Note that this value must be at least 16384 if HTTP/2 is + // enabled for the IngressController (https://tools.ietf.org/html/rfc7540). + // If this field is empty, the IngressController will use a default value + // of 32768 bytes. + // + // Setting this field is generally not recommended as headerBufferBytes + // values that are too small may break the IngressController and + // headerBufferBytes values that are too large could cause the + // IngressController to use significantly more memory than necessary. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=16384 + // +optional + HeaderBufferBytes int32 `json:"headerBufferBytes,omitempty"` + + // headerBufferMaxRewriteBytes describes how much memory should be reserved + // (in bytes) from headerBufferBytes for HTTP header rewriting + // and appending for IngressController connection sessions. + // Note that incoming HTTP requests will be limited to + // (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning + // headerBufferBytes must be greater than headerBufferMaxRewriteBytes. + // If this field is empty, the IngressController will use a default value + // of 8192 bytes. + // + // Setting this field is generally not recommended as + // headerBufferMaxRewriteBytes values that are too small may break the + // IngressController and headerBufferMaxRewriteBytes values that are too + // large could cause the IngressController to use significantly more memory + // than necessary. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=4096 + // +optional + HeaderBufferMaxRewriteBytes int32 `json:"headerBufferMaxRewriteBytes,omitempty"` + + // threadCount defines the number of threads created per HAProxy process. + // Creating more threads allows each ingress controller pod to handle more + // connections, at the cost of more system resources being used. HAProxy + // currently supports up to 64 threads. If this field is empty, the + // IngressController will use the default value. The current default is 4 + // threads, but this may change in future releases. + // + // Setting this field is generally not recommended. Increasing the number + // of HAProxy threads allows ingress controller pods to utilize more CPU + // time under load, potentially starving other pods if set too high. + // Reducing the number of threads may cause the ingress controller to + // perform poorly. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=64 + // +optional + ThreadCount int32 `json:"threadCount,omitempty"` +} + +var ( + // Available indicates the ingress controller deployment is available. + IngressControllerAvailableConditionType = "Available" + // LoadBalancerManaged indicates the management status of any load balancer + // service associated with an ingress controller. + LoadBalancerManagedIngressConditionType = "LoadBalancerManaged" + // LoadBalancerReady indicates the ready state of any load balancer service + // associated with an ingress controller. + LoadBalancerReadyIngressConditionType = "LoadBalancerReady" + // DNSManaged indicates the management status of any DNS records for the + // ingress controller. + DNSManagedIngressConditionType = "DNSManaged" + // DNSReady indicates the ready state of any DNS records for the ingress + // controller. + DNSReadyIngressConditionType = "DNSReady" +) + +// IngressControllerStatus defines the observed status of the IngressController. +type IngressControllerStatus struct { + // availableReplicas is number of observed available replicas according to the + // ingress controller deployment. + AvailableReplicas int32 `json:"availableReplicas"` + + // selector is a label selector, in string format, for ingress controller pods + // corresponding to the IngressController. The number of matching pods should + // equal the value of availableReplicas. + Selector string `json:"selector"` + + // domain is the actual domain in use. + Domain string `json:"domain"` + + // endpointPublishingStrategy is the actual strategy in use. + EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // conditions is a list of conditions and their status. + // + // Available means the ingress controller deployment is available and + // servicing route and ingress resources (i.e, .status.availableReplicas + // equals .spec.replicas) + // + // There are additional conditions which indicate the status of other + // ingress controller features and capabilities. + // + // * LoadBalancerManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy requires a service load balancer. + // - False if any of those conditions are unsatisfied. + // + // * LoadBalancerReady + // - True if the following conditions are met: + // * A load balancer is managed. + // * The load balancer is ready. + // - False if any of those conditions are unsatisfied. + // + // * DNSManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy and platform support DNS. + // * The ingress controller domain is set. + // * dns.config.openshift.io/cluster configures DNS zones. + // - False if any of those conditions are unsatisfied. + // + // * DNSReady + // - True if the following conditions are met: + // * DNS is managed. + // * DNS records have been successfully created. + // - False if any of those conditions are unsatisfied. + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // tlsProfile is the TLS connection configuration that is in effect. + // +optional + TLSProfile *configv1.TLSProfileSpec `json:"tlsProfile,omitempty"` + + // observedGeneration is the most recent generation observed. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// IngressControllerList contains a list of IngressControllers. +type IngressControllerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IngressController `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go new file mode 100644 index 0000000000..cd657c5542 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPIServer provides information to configure an operator to manage kube-apiserver. +type KubeAPIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes API Server + // +kubebuilder:validation:Required + // +required + Spec KubeAPIServerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes API Server + // +optional + Status KubeAPIServerStatus `json:"status"` +} + +type KubeAPIServerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeAPIServerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeAPIServerList is a collection of items +type KubeAPIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go new file mode 100644 index 0000000000..c20ae30ccd --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeControllerManager provides information to configure an operator to manage kube-controller-manager. +type KubeControllerManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Controller Manager + // +kubebuilder:validation:Required + // +required + Spec KubeControllerManagerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Controller Manager + // +optional + Status KubeControllerManagerStatus `json:"status"` +} + +type KubeControllerManagerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeControllerManagerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeControllerManagerList is a collection of items +type KubeControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go new file mode 100644 index 0000000000..5949ac021a --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator. +type KubeStorageVersionMigrator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec KubeStorageVersionMigratorSpec `json:"spec"` + // +optional + Status KubeStorageVersionMigratorStatus `json:"status"` +} + +type KubeStorageVersionMigratorSpec struct { + OperatorSpec `json:",inline"` +} + +type KubeStorageVersionMigratorStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeStorageVersionMigratorList is a collection of items +type KubeStorageVersionMigratorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeStorageVersionMigrator `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go new file mode 100644 index 0000000000..33b23bc8a7 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -0,0 +1,539 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network describes the cluster's desired network configuration. It is +// consumed by the cluster-network-operator. +// +k8s:openapi-gen=true +type Network struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NetworkSpec `json:"spec,omitempty"` + Status NetworkStatus `json:"status,omitempty"` +} + +// NetworkStatus is detailed operator status, which is distilled +// up to the Network clusteroperator object. +type NetworkStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkList contains a list of Network configurations +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Network `json:"items"` +} + +// NetworkSpec is the top-level network configuration object. +type NetworkSpec struct { + OperatorSpec `json:",inline"` + + // clusterNetwork is the IP address pool to use for pod IPs. + // Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. + // Others only support one. This is equivalent to the cluster-cidr. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // serviceNetwork is the ip address pool to use for Service IPs + // Currently, all existing network providers only support a single value + // here, but this is an array to allow for growth. + ServiceNetwork []string `json:"serviceNetwork"` + + // defaultNetwork is the "default" network that all pods will receive + DefaultNetwork DefaultNetworkDefinition `json:"defaultNetwork"` + + // additionalNetworks is a list of extra networks to make available to pods + // when multiple networks are enabled. + AdditionalNetworks []AdditionalNetworkDefinition `json:"additionalNetworks,omitempty"` + + // disableMultiNetwork specifies whether or not multiple pod network + // support should be disabled. If unset, this property defaults to + // 'false' and multiple network support is enabled. + DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"` + + // useMultiNetworkPolicy enables a controller which allows for + // MultiNetworkPolicy objects to be used on additional networks as + // created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy + // objects, but NetworkPolicy objects only apply to the primary interface. + // With MultiNetworkPolicy, you can control the traffic that a pod can receive + // over the secondary interfaces. If unset, this property defaults to 'false' + // and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is + // 'true' then the value of this field is ignored. + UseMultiNetworkPolicy *bool `json:"useMultiNetworkPolicy,omitempty"` + + // deployKubeProxy specifies whether or not a standalone kube-proxy should + // be deployed by the operator. Some network providers include kube-proxy + // or similar functionality. If unset, the plugin will attempt to select + // the correct value, which is false when OpenShift SDN and ovn-kubernetes are + // used and true otherwise. + // +optional + DeployKubeProxy *bool `json:"deployKubeProxy,omitempty"` + + // disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck + // CRs from a test pod to every node, apiserver and LB should be disabled or not. + // If unset, this property defaults to 'false' and network diagnostics is enabled. + // Setting this to 'true' would reduce the additional load of the pods performing the checks. + // +optional + // +kubebuilder:default:=false + DisableNetworkDiagnostics bool `json:"disableNetworkDiagnostics"` + + // kubeProxyConfig lets us configure desired proxy configuration. + // If not specified, sensible defaults will be chosen by OpenShift directly. + // Not consumed by all network providers - currently only openshift-sdn. + KubeProxyConfig *ProxyConfig `json:"kubeProxyConfig,omitempty"` + + // exportNetworkFlows enables and configures the export of network flow metadata from the pod network + // by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. + // If unset, flows will not be exported to any collector. + // +optional + // +kubebuilder:validation:MinProperties=1 + ExportNetworkFlows *ExportNetworkFlows `json:"exportNetworkFlows,omitempty"` + + // migration enables and configures the cluster network migration. + // Setting this to the target network type to allow changing the default network. + // If unset, the operation of changing cluster default network plugin will be rejected. + // +optional + Migration *NetworkMigration `json:"migration,omitempty"` +} + +// NetworkMigration represents the cluster network configuration. +type NetworkMigration struct { + // networkType is the target type of network migration + // The supported values are OpenShiftSDN, OVNKubernetes + NetworkType NetworkType `json:"networkType"` +} + +// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size +// HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If +// the HostPrefix field is not used by the plugin, it can be left unset. +// Not all network providers support multiple ClusterNetworks +type ClusterNetworkEntry struct { + CIDR string `json:"cidr"` + // +kubebuilder:validation:Minimum=0 + // +optional + HostPrefix uint32 `json:"hostPrefix,omitempty"` +} + +// DefaultNetworkDefinition represents a single network plugin's configuration. +// type must be specified, along with exactly one "Config" that matches the type. +type DefaultNetworkDefinition struct { + // type is the type of network + // All NetworkTypes are supported except for NetworkTypeRaw + Type NetworkType `json:"type"` + + // openShiftSDNConfig configures the openshift-sdn plugin + // +optional + OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"` + + // oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently + // not implemented. + // +optional + OVNKubernetesConfig *OVNKubernetesConfig `json:"ovnKubernetesConfig,omitempty"` + + // KuryrConfig configures the kuryr plugin + // +optional + KuryrConfig *KuryrConfig `json:"kuryrConfig,omitempty"` +} + +// SimpleMacvlanConfig contains configurations for macvlan interface. +type SimpleMacvlanConfig struct { + // master is the host interface to create the macvlan interface from. + // If not specified, it will be default route interface + // +optional + Master string `json:"master,omitempty"` + + // IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). + // +optional + IPAMConfig *IPAMConfig `json:"ipamConfig,omitempty"` + + // mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge + // +optional + Mode MacvlanMode `json:"mode,omitempty"` + + // mtu is the mtu to use for the macvlan interface. if unset, host's + // kernel will select the value. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU uint32 `json:"mtu,omitempty"` +} + +// StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses +type StaticIPAMAddresses struct { + // Address is the IP address in CIDR format + // +optional + Address string `json:"address"` + // Gateway is IP inside of subnet to designate as the gateway + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes +type StaticIPAMRoutes struct { + // Destination points the IP route destination + Destination string `json:"destination"` + // Gateway is the route's next-hop IP address + // If unset, a default gateway is assumed (as determined by the CNI plugin). + // +optional + Gateway string `json:"gateway,omitempty"` +} + +// StaticIPAMDNS provides DNS related information for static IPAM +type StaticIPAMDNS struct { + // Nameservers points DNS servers for IP lookup + // +optional + Nameservers []string `json:"nameservers,omitempty"` + // Domain configures the domainname the local domain used for short hostname lookups + // +optional + Domain string `json:"domain,omitempty"` + // Search configures priority ordered search domains for short hostname lookups + // +optional + Search []string `json:"search,omitempty"` +} + +// StaticIPAMConfig contains configurations for static IPAM (IP Address Management) +type StaticIPAMConfig struct { + // Addresses configures IP address for the interface + // +optional + Addresses []StaticIPAMAddresses `json:"addresses,omitempty"` + // Routes configures IP routes for the interface + // +optional + Routes []StaticIPAMRoutes `json:"routes,omitempty"` + // DNS configures DNS for the interface + // +optional + DNS *StaticIPAMDNS `json:"dns,omitempty"` +} + +// IPAMConfig contains configurations for IPAM (IP Address Management) +type IPAMConfig struct { + // Type is the type of IPAM module will be used for IP Address Management(IPAM). + // The supported values are IPAMTypeDHCP, IPAMTypeStatic + Type IPAMType `json:"type"` + + // StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic + // +optional + StaticIPAMConfig *StaticIPAMConfig `json:"staticIPAMConfig,omitempty"` +} + +// AdditionalNetworkDefinition configures an extra network that is available but not +// created by default. Instead, pods must request them by name. +// type must be specified, along with exactly one "Config" that matches the type. +type AdditionalNetworkDefinition struct { + // type is the type of network + // The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan + Type NetworkType `json:"type"` + + // name is the name of the network. This will be populated in the resulting CRD + // This must be unique. + Name string `json:"name"` + + // namespace is the namespace of the network. This will be populated in the resulting CRD + // If not given the network will be created in the default namespace. + Namespace string `json:"namespace,omitempty"` + + // rawCNIConfig is the raw CNI configuration json to create in the + // NetworkAttachmentDefinition CRD + RawCNIConfig string `json:"rawCNIConfig,omitempty"` + + // SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan + // +optional + SimpleMacvlanConfig *SimpleMacvlanConfig `json:"simpleMacvlanConfig,omitempty"` +} + +// OpenShiftSDNConfig configures the three openshift-sdn plugins +type OpenShiftSDNConfig struct { + // mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + Mode SDNMode `json:"mode"` + + // vxlanPort is the port to use for all vxlan packets. The default is 4789. + // +kubebuilder:validation:Minimum=0 + // +optional + VXLANPort *uint32 `json:"vxlanPort,omitempty"` + + // mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. + // This must be 50 bytes smaller than the machine's uplink. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + + // useExternalOpenvswitch tells the operator not to install openvswitch, because + // it will be provided separately. If set, you must provide it yourself. + // +optional + UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` + + // enableUnidling controls whether or not the service proxy will support idling + // and unidling of services. By default, unidling is enabled. + EnableUnidling *bool `json:"enableUnidling,omitempty"` +} + +// KuryrConfig configures the Kuryr-Kubernetes SDN +type KuryrConfig struct { + // The port kuryr-daemon will listen for readiness and liveness requests. + // +kubebuilder:validation:Minimum=0 + // +optional + DaemonProbesPort *uint32 `json:"daemonProbesPort,omitempty"` + + // The port kuryr-controller will listen for readiness and liveness requests. + // +kubebuilder:validation:Minimum=0 + // +optional + ControllerProbesPort *uint32 `json:"controllerProbesPort,omitempty"` + + // openStackServiceNetwork contains the CIDR of network from which to allocate IPs for + // OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses + // two IPs from that network for each loadbalancer - one given by OpenShift and second + // for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's + // IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` + // needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` + // must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then + // make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that + // are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set + // cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix + // size by 1. + // +optional + OpenStackServiceNetwork string `json:"openStackServiceNetwork,omitempty"` + + // enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port + // pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact + // that it takes a significant amount of time to create one. Instead of creating it when + // pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By + // default port prepopulation is disabled. + // +optional + EnablePortPoolsPrepopulation bool `json:"enablePortPoolsPrepopulation,omitempty"` + + // poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. + // If the number of ports exceeds this setting, free ports will get deleted. Setting 0 + // will disable this upper bound, effectively preventing pools from shrinking and this + // is the default value. For more information about port pools see + // enablePortPoolsPrepopulation setting. + // +kubebuilder:validation:Minimum=0 + // +optional + PoolMaxPorts uint `json:"poolMaxPorts,omitempty"` + + // poolMinPorts sets a minimum number of free ports that should be kept in a port pool. + // If the number of ports is lower than this setting, new ports will get created and + // added to pool. The default is 1. For more information about port pools see + // enablePortPoolsPrepopulation setting. + // +kubebuilder:validation:Minimum=1 + // +optional + PoolMinPorts uint `json:"poolMinPorts,omitempty"` + + // poolBatchPorts sets a number of ports that should be created in a single batch request + // to extend the port pool. The default is 3. For more information about port pools see + // enablePortPoolsPrepopulation setting. + // +kubebuilder:validation:Minimum=0 + // +optional + PoolBatchPorts *uint `json:"poolBatchPorts,omitempty"` + + // mtu is the MTU that Kuryr should use when creating pod networks in Neutron. + // The value has to be lower or equal to the MTU of the nodes network and Neutron has + // to allow creation of tenant networks with such MTU. If unset Pod networks will be + // created with the same MTU as the nodes network has. + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` +} + +// ovnKubernetesConfig contains the configuration parameters for networks +// using the ovn-kubernetes network project +type OVNKubernetesConfig struct { + // mtu is the MTU to use for the tunnel interface. This must be 100 + // bytes smaller than the uplink mtu. + // Default is 1400 + // +kubebuilder:validation:Minimum=0 + // +optional + MTU *uint32 `json:"mtu,omitempty"` + // geneve port is the UDP port to be used by geneve encapulation. + // Default is 6081 + // +kubebuilder:validation:Minimum=1 + // +optional + GenevePort *uint32 `json:"genevePort,omitempty"` + // HybridOverlayConfig configures an additional overlay network for peers that are + // not using OVN. + // +optional + HybridOverlayConfig *HybridOverlayConfig `json:"hybridOverlayConfig,omitempty"` + // ipsecConfig enables and configures IPsec for pods on the pod network within the + // cluster. + // +optional + IPsecConfig *IPsecConfig `json:"ipsecConfig,omitempty"` + // policyAuditConfig is the configuration for network policy audit events. If unset, + // reported defaults are used. + // +optional + PolicyAuditConfig *PolicyAuditConfig `json:"policyAuditConfig,omitempty"` +} + +type HybridOverlayConfig struct { + // HybridClusterNetwork defines a network space given to nodes on an additional overlay network. + HybridClusterNetwork []ClusterNetworkEntry `json:"hybridClusterNetwork"` + // HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. + // Default is 4789 + // +optional + HybridOverlayVXLANPort *uint32 `json:"hybridOverlayVXLANPort,omitempty"` +} + +type IPsecConfig struct { +} + +type ExportNetworkFlows struct { + // netFlow defines the NetFlow configuration. + // +optional + NetFlow *NetFlowConfig `json:"netFlow,omitempty"` + // sFlow defines the SFlow configuration. + // +optional + SFlow *SFlowConfig `json:"sFlow,omitempty"` + // ipfix defines IPFIX configuration. + // +optional + IPFIX *IPFIXConfig `json:"ipfix,omitempty"` +} + +type NetFlowConfig struct { + // netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. + // It is a list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + Collectors []IPPort `json:"collectors,omitempty"` +} + +type SFlowConfig struct { + // sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + Collectors []IPPort `json:"collectors,omitempty"` +} + +type IPFIXConfig struct { + // ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + Collectors []IPPort `json:"collectors,omitempty"` +} + +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):[0-9]+$` +type IPPort string + +type PolicyAuditConfig struct { + // rateLimit is the approximate maximum number of messages to generate per-second per-node. If + // unset the default of 20 msg/sec is used. + // +kubebuilder:default=20 + // +kubebuilder:validation:Minimum=1 + // +optional + RateLimit *uint32 `json:"rateLimit,omitempty"` + + // maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs + // Units are in MB and the Default is 50MB + // +kubebuilder:default=50 + // +kubebuilder:validation:Minimum=1 + // +optional + MaxFileSize *uint32 `json:"maxFileSize,omitempty"` + + // destination is the location for policy log messages. + // Regardless of this config, persistent logs will always be dumped to the host + // at /var/log/ovn/ however + // Additionally syslog output may be configured as follows. + // Valid values are: + // - "libc" -> to use the libc syslog() function of the host node's journdald process + // - "udp:host:port" -> for sending syslog over UDP + // - "unix:file" -> for using the UNIX domain socket directly + // - "null" -> to discard all messages logged to syslog + // The default is "null" + // +kubebuilder:default=null + // +kubebuilder:pattern='^libc$|^null$|^udp:(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]):([0-9]){0,5}$|^unix:(\/[^\/ ]*)+([^\/\s])$' + // +optional + Destination string `json:"destination,omitempty"` + + // syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" + // +kubebuilder:default=local0 + // +kubebuilder:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;clock;ftp;ntp;audit;alert;clock2;local0;local1;local2;local3;local4;local5;local6;local7 + // +optional + SyslogFacility string `json:"syslogFacility,omitempty"` +} + +// NetworkType describes the network plugin type to configure +type NetworkType string + +// ProxyArgumentList is a list of arguments to pass to the kubeproxy process +type ProxyArgumentList []string + +// ProxyConfig defines the configuration knobs for kubeproxy +// All of these are optional and have sensible defaults +type ProxyConfig struct { + // An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted + // in large clusters for performance reasons, but this is no longer necessary, and there is no reason + // to change this from the default value. + // Default: 30s + IptablesSyncPeriod string `json:"iptablesSyncPeriod,omitempty"` + + // The address to "bind" on + // Defaults to 0.0.0.0 + BindAddress string `json:"bindAddress,omitempty"` + + // Any additional arguments to pass to the kubeproxy process + ProxyArguments map[string]ProxyArgumentList `json:"proxyArguments,omitempty"` +} + +const ( + // NetworkTypeOpenShiftSDN means the openshift-sdn plugin will be configured + NetworkTypeOpenShiftSDN NetworkType = "OpenShiftSDN" + + // NetworkTypeOVNKubernetes means the ovn-kubernetes project will be configured. + // This is currently not implemented. + NetworkTypeOVNKubernetes NetworkType = "OVNKubernetes" + + // NetworkTypeKuryr means the kuryr-kubernetes project will be configured. + NetworkTypeKuryr NetworkType = "Kuryr" + + // NetworkTypeRaw + NetworkTypeRaw NetworkType = "Raw" + + // NetworkTypeSimpleMacvlan + NetworkTypeSimpleMacvlan NetworkType = "SimpleMacvlan" +) + +// SDNMode is the Mode the openshift-sdn plugin is in +type SDNMode string + +const ( + // SDNModeSubnet is a simple mode that offers no isolation between pods + SDNModeSubnet SDNMode = "Subnet" + + // SDNModeMultitenant is a special "multitenant" mode that offers limited + // isolation configuration between namespaces + SDNModeMultitenant SDNMode = "Multitenant" + + // SDNModeNetworkPolicy is a full NetworkPolicy implementation that allows + // for sophisticated network isolation and segmenting. This is the default. + SDNModeNetworkPolicy SDNMode = "NetworkPolicy" +) + +// MacvlanMode is the Mode of macvlan. The value are lowercase to match the CNI plugin +// config values. See "man ip-link" for its detail. +type MacvlanMode string + +const ( + // MacvlanModeBridge is the macvlan with thin bridge function. + MacvlanModeBridge MacvlanMode = "Bridge" + // MacvlanModePrivate + MacvlanModePrivate MacvlanMode = "Private" + // MacvlanModeVEPA is used with Virtual Ethernet Port Aggregator + // (802.1qbg) swtich + MacvlanModeVEPA MacvlanMode = "VEPA" + // MacvlanModePassthru + MacvlanModePassthru MacvlanMode = "Passthru" +) + +// IPAMType describes the IP address management type to configure +type IPAMType string + +const ( + // IPAMTypeDHCP uses DHCP for IP management + IPAMTypeDHCP IPAMType = "DHCP" + // IPAMTypeStatic uses static IP + IPAMTypeStatic IPAMType = "Static" +) diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go new file mode 100644 index 0000000000..8ab50ed321 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go @@ -0,0 +1,50 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver. +type OpenShiftAPIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the OpenShift API Server. + // +kubebuilder:validation:Required + // +required + Spec OpenShiftAPIServerSpec `json:"spec"` + + // status defines the observed status of the OpenShift API Server. + // +optional + Status OpenShiftAPIServerStatus `json:"status"` +} + +type OpenShiftAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftAPIServerStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableRevision is the latest revision used as suffix of revisioned + // secrets like encryption-config. A new revision causes a new deployment of + // pods. + // +optional + // +kubebuilder:validation:Minimum=0 + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftAPIServerList is a collection of items +type OpenShiftAPIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []OpenShiftAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go new file mode 100644 index 0000000000..0f23b01be2 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager. +type OpenShiftControllerManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec OpenShiftControllerManagerSpec `json:"spec"` + // +optional + Status OpenShiftControllerManagerStatus `json:"status"` +} + +type OpenShiftControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type OpenShiftControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenShiftControllerManagerList is a collection of items +type OpenShiftControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []OpenShiftControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go new file mode 100644 index 0000000000..f8a542082c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go @@ -0,0 +1,43 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeScheduler provides information to configure an operator to manage scheduler. +type KubeScheduler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec is the specification of the desired behavior of the Kubernetes Scheduler + // +kubebuilder:validation:Required + // +required + Spec KubeSchedulerSpec `json:"spec"` + + // status is the most recently observed status of the Kubernetes Scheduler + // +optional + Status KubeSchedulerStatus `json:"status"` +} + +type KubeSchedulerSpec struct { + StaticPodOperatorSpec `json:",inline"` +} + +type KubeSchedulerStatus struct { + StaticPodOperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeSchedulerList is a collection of items +type KubeSchedulerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []KubeScheduler `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go new file mode 100644 index 0000000000..b8d5e2646a --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -0,0 +1,42 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCA provides information to configure an operator to manage the service cert controllers +type ServiceCA struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ServiceCASpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ServiceCAStatus `json:"status"` +} + +type ServiceCASpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCAStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCAList is a collection of items +type ServiceCAList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []ServiceCA `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go new file mode 100644 index 0000000000..4dc98f4a4d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go @@ -0,0 +1,42 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server +// DEPRECATED: will be removed in 4.6 +type ServiceCatalogAPIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec ServiceCatalogAPIServerSpec `json:"spec"` + // +optional + Status ServiceCatalogAPIServerStatus `json:"status"` +} + +type ServiceCatalogAPIServerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogAPIServerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogAPIServerList is a collection of items +// DEPRECATED: will be removed in 4.6 +type ServiceCatalogAPIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []ServiceCatalogAPIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go new file mode 100644 index 0000000000..f4cc3f6957 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go @@ -0,0 +1,42 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager +// DEPRECATED: will be removed in 4.6 +type ServiceCatalogControllerManager struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec ServiceCatalogControllerManagerSpec `json:"spec"` + // +optional + Status ServiceCatalogControllerManagerStatus `json:"status"` +} + +type ServiceCatalogControllerManagerSpec struct { + OperatorSpec `json:",inline"` +} + +type ServiceCatalogControllerManagerStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCatalogControllerManagerList is a collection of items +// DEPRECATED: will be removed in 4.6 +type ServiceCatalogControllerManagerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []ServiceCatalogControllerManager `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/types_storage.go b/vendor/github.com/openshift/api/operator/v1/types_storage.go new file mode 100644 index 0000000000..d5d3bd407e --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_storage.go @@ -0,0 +1,44 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name. +type Storage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec StorageSpec `json:"spec"` + + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status StorageStatus `json:"status"` +} + +// StorageSpec is the specification of the desired behavior of the cluster storage operator. +type StorageSpec struct { + OperatorSpec `json:",inline"` +} + +// StorageStatus defines the observed status of the cluster storage operator. +type StorageStatus struct { + OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// StorageList contains a list of Storages. +type StorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Storage `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..befbfb16c3 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -0,0 +1,3565 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSClassicLoadBalancerParameters) DeepCopyInto(out *AWSClassicLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSClassicLoadBalancerParameters. +func (in *AWSClassicLoadBalancerParameters) DeepCopy() *AWSClassicLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSClassicLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSLoadBalancerParameters) DeepCopyInto(out *AWSLoadBalancerParameters) { + *out = *in + if in.ClassicLoadBalancerParameters != nil { + in, out := &in.ClassicLoadBalancerParameters, &out.ClassicLoadBalancerParameters + *out = new(AWSClassicLoadBalancerParameters) + **out = **in + } + if in.NetworkLoadBalancerParameters != nil { + in, out := &in.NetworkLoadBalancerParameters, &out.NetworkLoadBalancerParameters + *out = new(AWSNetworkLoadBalancerParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSLoadBalancerParameters. +func (in *AWSLoadBalancerParameters) DeepCopy() *AWSLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSNetworkLoadBalancerParameters) DeepCopyInto(out *AWSNetworkLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSNetworkLoadBalancerParameters. +func (in *AWSNetworkLoadBalancerParameters) DeepCopy() *AWSNetworkLoadBalancerParameters { + if in == nil { + return nil + } + out := new(AWSNetworkLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessLogging) DeepCopyInto(out *AccessLogging) { + *out = *in + in.Destination.DeepCopyInto(&out.Destination) + in.HTTPCaptureHeaders.DeepCopyInto(&out.HTTPCaptureHeaders) + if in.HTTPCaptureCookies != nil { + in, out := &in.HTTPCaptureCookies, &out.HTTPCaptureCookies + *out = make([]IngressControllerCaptureHTTPCookie, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLogging. +func (in *AccessLogging) DeepCopy() *AccessLogging { + if in == nil { + return nil + } + out := new(AccessLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddPage) DeepCopyInto(out *AddPage) { + *out = *in + if in.DisabledActions != nil { + in, out := &in.DisabledActions, &out.DisabledActions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddPage. +func (in *AddPage) DeepCopy() *AddPage { + if in == nil { + return nil + } + out := new(AddPage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalNetworkDefinition) DeepCopyInto(out *AdditionalNetworkDefinition) { + *out = *in + if in.SimpleMacvlanConfig != nil { + in, out := &in.SimpleMacvlanConfig, &out.SimpleMacvlanConfig + *out = new(SimpleMacvlanConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalNetworkDefinition. +func (in *AdditionalNetworkDefinition) DeepCopy() *AdditionalNetworkDefinition { + if in == nil { + return nil + } + out := new(AdditionalNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + out.OAuthAPIServer = in.OAuthAPIServer + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotController) DeepCopyInto(out *CSISnapshotController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotController. +func (in *CSISnapshotController) DeepCopy() *CSISnapshotController { + if in == nil { + return nil + } + out := new(CSISnapshotController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerList) DeepCopyInto(out *CSISnapshotControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSISnapshotController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerList. +func (in *CSISnapshotControllerList) DeepCopy() *CSISnapshotControllerList { + if in == nil { + return nil + } + out := new(CSISnapshotControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSISnapshotControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerSpec) DeepCopyInto(out *CSISnapshotControllerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerSpec. +func (in *CSISnapshotControllerSpec) DeepCopy() *CSISnapshotControllerSpec { + if in == nil { + return nil + } + out := new(CSISnapshotControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSISnapshotControllerStatus) DeepCopyInto(out *CSISnapshotControllerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSISnapshotControllerStatus. +func (in *CSISnapshotControllerStatus) DeepCopy() *CSISnapshotControllerStatus { + if in == nil { + return nil + } + out := new(CSISnapshotControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredential) DeepCopyInto(out *CloudCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredential. +func (in *CloudCredential) DeepCopy() *CloudCredential { + if in == nil { + return nil + } + out := new(CloudCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialList) DeepCopyInto(out *CloudCredentialList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudCredential, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialList. +func (in *CloudCredentialList) DeepCopy() *CloudCredentialList { + if in == nil { + return nil + } + out := new(CloudCredentialList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudCredentialList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialSpec) DeepCopyInto(out *CloudCredentialSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialSpec. +func (in *CloudCredentialSpec) DeepCopy() *CloudCredentialSpec { + if in == nil { + return nil + } + out := new(CloudCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudCredentialStatus) DeepCopyInto(out *CloudCredentialStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudCredentialStatus. +func (in *CloudCredentialStatus) DeepCopy() *CloudCredentialStatus { + if in == nil { + return nil + } + out := new(CloudCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriver) DeepCopyInto(out *ClusterCSIDriver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriver. +func (in *ClusterCSIDriver) DeepCopy() *ClusterCSIDriver { + if in == nil { + return nil + } + out := new(ClusterCSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCSIDriver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverList) DeepCopyInto(out *ClusterCSIDriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterCSIDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverList. +func (in *ClusterCSIDriverList) DeepCopy() *ClusterCSIDriverList { + if in == nil { + return nil + } + out := new(ClusterCSIDriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCSIDriverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverSpec) DeepCopyInto(out *ClusterCSIDriverSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverSpec. +func (in *ClusterCSIDriverSpec) DeepCopy() *ClusterCSIDriverSpec { + if in == nil { + return nil + } + out := new(ClusterCSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCSIDriverStatus) DeepCopyInto(out *ClusterCSIDriverStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCSIDriverStatus. +func (in *ClusterCSIDriverStatus) DeepCopy() *ClusterCSIDriverStatus { + if in == nil { + return nil + } + out := new(ClusterCSIDriverStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. +func (in *ConfigSpec) DeepCopy() *ConfigSpec { + if in == nil { + return nil + } + out := new(ConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. +func (in *ConfigStatus) DeepCopy() *ConfigStatus { + if in == nil { + return nil + } + out := new(ConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleConfigRoute) DeepCopyInto(out *ConsoleConfigRoute) { + *out = *in + out.Secret = in.Secret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleConfigRoute. +func (in *ConsoleConfigRoute) DeepCopy() *ConsoleConfigRoute { + if in == nil { + return nil + } + out := new(ConsoleConfigRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleCustomization) DeepCopyInto(out *ConsoleCustomization) { + *out = *in + out.CustomLogoFile = in.CustomLogoFile + in.DeveloperCatalog.DeepCopyInto(&out.DeveloperCatalog) + in.ProjectAccess.DeepCopyInto(&out.ProjectAccess) + in.QuickStarts.DeepCopyInto(&out.QuickStarts) + in.AddPage.DeepCopyInto(&out.AddPage) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCustomization. +func (in *ConsoleCustomization) DeepCopy() *ConsoleCustomization { + if in == nil { + return nil + } + out := new(ConsoleCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleProviders) DeepCopyInto(out *ConsoleProviders) { + *out = *in + if in.Statuspage != nil { + in, out := &in.Statuspage, &out.Statuspage + *out = new(StatuspageProvider) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleProviders. +func (in *ConsoleProviders) DeepCopy() *ConsoleProviders { + if in == nil { + return nil + } + out := new(ConsoleProviders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + in.Customization.DeepCopyInto(&out.Customization) + in.Providers.DeepCopyInto(&out.Providers) + out.Route = in.Route + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerLoggingDestinationParameters) DeepCopyInto(out *ContainerLoggingDestinationParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerLoggingDestinationParameters. +func (in *ContainerLoggingDestinationParameters) DeepCopy() *ContainerLoggingDestinationParameters { + if in == nil { + return nil + } + out := new(ContainerLoggingDestinationParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSNodePlacement) DeepCopyInto(out *DNSNodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSNodePlacement. +func (in *DNSNodePlacement) DeepCopy() *DNSNodePlacement { + if in == nil { + return nil + } + out := new(DNSNodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.NodePlacement.DeepCopyInto(&out.NodePlacement) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) { + *out = *in + if in.OpenShiftSDNConfig != nil { + in, out := &in.OpenShiftSDNConfig, &out.OpenShiftSDNConfig + *out = new(OpenShiftSDNConfig) + (*in).DeepCopyInto(*out) + } + if in.OVNKubernetesConfig != nil { + in, out := &in.OVNKubernetesConfig, &out.OVNKubernetesConfig + *out = new(OVNKubernetesConfig) + (*in).DeepCopyInto(*out) + } + if in.KuryrConfig != nil { + in, out := &in.KuryrConfig, &out.KuryrConfig + *out = new(KuryrConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNetworkDefinition. +func (in *DefaultNetworkDefinition) DeepCopy() *DefaultNetworkDefinition { + if in == nil { + return nil + } + out := new(DefaultNetworkDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCategory) DeepCopyInto(out *DeveloperConsoleCatalogCategory) { + *out = *in + in.DeveloperConsoleCatalogCategoryMeta.DeepCopyInto(&out.DeveloperConsoleCatalogCategoryMeta) + if in.Subcategories != nil { + in, out := &in.Subcategories, &out.Subcategories + *out = make([]DeveloperConsoleCatalogCategoryMeta, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategory. +func (in *DeveloperConsoleCatalogCategory) DeepCopy() *DeveloperConsoleCatalogCategory { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCategory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopyInto(out *DeveloperConsoleCatalogCategoryMeta) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCategoryMeta. +func (in *DeveloperConsoleCatalogCategoryMeta) DeepCopy() *DeveloperConsoleCatalogCategoryMeta { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCategoryMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConsoleCatalogCustomization) DeepCopyInto(out *DeveloperConsoleCatalogCustomization) { + *out = *in + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]DeveloperConsoleCatalogCategory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConsoleCatalogCustomization. +func (in *DeveloperConsoleCatalogCustomization) DeepCopy() *DeveloperConsoleCatalogCustomization { + if in == nil { + return nil + } + out := new(DeveloperConsoleCatalogCustomization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPublishingStrategy) DeepCopyInto(out *EndpointPublishingStrategy) { + *out = *in + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerStrategy) + (*in).DeepCopyInto(*out) + } + if in.HostNetwork != nil { + in, out := &in.HostNetwork, &out.HostNetwork + *out = new(HostNetworkStrategy) + **out = **in + } + if in.Private != nil { + in, out := &in.Private, &out.Private + *out = new(PrivateStrategy) + **out = **in + } + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortStrategy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPublishingStrategy. +func (in *EndpointPublishingStrategy) DeepCopy() *EndpointPublishingStrategy { + if in == nil { + return nil + } + out := new(EndpointPublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Etcd) DeepCopyInto(out *Etcd) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. +func (in *Etcd) DeepCopy() *Etcd { + if in == nil { + return nil + } + out := new(Etcd) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Etcd) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdList) DeepCopyInto(out *EtcdList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Etcd, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdList. +func (in *EtcdList) DeepCopy() *EtcdList { + if in == nil { + return nil + } + out := new(EtcdList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EtcdList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdSpec. +func (in *EtcdSpec) DeepCopy() *EtcdSpec { + if in == nil { + return nil + } + out := new(EtcdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStatus) DeepCopyInto(out *EtcdStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStatus. +func (in *EtcdStatus) DeepCopy() *EtcdStatus { + if in == nil { + return nil + } + out := new(EtcdStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportNetworkFlows) DeepCopyInto(out *ExportNetworkFlows) { + *out = *in + if in.NetFlow != nil { + in, out := &in.NetFlow, &out.NetFlow + *out = new(NetFlowConfig) + (*in).DeepCopyInto(*out) + } + if in.SFlow != nil { + in, out := &in.SFlow, &out.SFlow + *out = new(SFlowConfig) + (*in).DeepCopyInto(*out) + } + if in.IPFIX != nil { + in, out := &in.IPFIX, &out.IPFIX + *out = new(IPFIXConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportNetworkFlows. +func (in *ExportNetworkFlows) DeepCopy() *ExportNetworkFlows { + if in == nil { + return nil + } + out := new(ExportNetworkFlows) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardPlugin) DeepCopyInto(out *ForwardPlugin) { + *out = *in + if in.Upstreams != nil { + in, out := &in.Upstreams, &out.Upstreams + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardPlugin. +func (in *ForwardPlugin) DeepCopy() *ForwardPlugin { + if in == nil { + return nil + } + out := new(ForwardPlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPLoadBalancerParameters) DeepCopyInto(out *GCPLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPLoadBalancerParameters. +func (in *GCPLoadBalancerParameters) DeepCopy() *GCPLoadBalancerParameters { + if in == nil { + return nil + } + out := new(GCPLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerationStatus) DeepCopyInto(out *GenerationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationStatus. +func (in *GenerationStatus) DeepCopy() *GenerationStatus { + if in == nil { + return nil + } + out := new(GenerationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostNetworkStrategy) DeepCopyInto(out *HostNetworkStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostNetworkStrategy. +func (in *HostNetworkStrategy) DeepCopy() *HostNetworkStrategy { + if in == nil { + return nil + } + out := new(HostNetworkStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HybridOverlayConfig) DeepCopyInto(out *HybridOverlayConfig) { + *out = *in + if in.HybridClusterNetwork != nil { + in, out := &in.HybridClusterNetwork, &out.HybridClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.HybridOverlayVXLANPort != nil { + in, out := &in.HybridOverlayVXLANPort, &out.HybridOverlayVXLANPort + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HybridOverlayConfig. +func (in *HybridOverlayConfig) DeepCopy() *HybridOverlayConfig { + if in == nil { + return nil + } + out := new(HybridOverlayConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { + *out = *in + if in.StaticIPAMConfig != nil { + in, out := &in.StaticIPAMConfig, &out.StaticIPAMConfig + *out = new(StaticIPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfig. +func (in *IPAMConfig) DeepCopy() *IPAMConfig { + if in == nil { + return nil + } + out := new(IPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPFIXConfig) DeepCopyInto(out *IPFIXConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPFIXConfig. +func (in *IPFIXConfig) DeepCopy() *IPFIXConfig { + if in == nil { + return nil + } + out := new(IPFIXConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPsecConfig) DeepCopyInto(out *IPsecConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecConfig. +func (in *IPsecConfig) DeepCopy() *IPsecConfig { + if in == nil { + return nil + } + out := new(IPsecConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressController) DeepCopyInto(out *IngressController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressController. +func (in *IngressController) DeepCopy() *IngressController { + if in == nil { + return nil + } + out := new(IngressController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPCookie) DeepCopyInto(out *IngressControllerCaptureHTTPCookie) { + *out = *in + out.IngressControllerCaptureHTTPCookieUnion = in.IngressControllerCaptureHTTPCookieUnion + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookie. +func (in *IngressControllerCaptureHTTPCookie) DeepCopy() *IngressControllerCaptureHTTPCookie { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPCookie) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopyInto(out *IngressControllerCaptureHTTPCookieUnion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPCookieUnion. +func (in *IngressControllerCaptureHTTPCookieUnion) DeepCopy() *IngressControllerCaptureHTTPCookieUnion { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPCookieUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPHeader) DeepCopyInto(out *IngressControllerCaptureHTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeader. +func (in *IngressControllerCaptureHTTPHeader) DeepCopy() *IngressControllerCaptureHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerCaptureHTTPHeaders) DeepCopyInto(out *IngressControllerCaptureHTTPHeaders) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) + copy(*out, *in) + } + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]IngressControllerCaptureHTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerCaptureHTTPHeaders. +func (in *IngressControllerCaptureHTTPHeaders) DeepCopy() *IngressControllerCaptureHTTPHeaders { + if in == nil { + return nil + } + out := new(IngressControllerCaptureHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaders) DeepCopyInto(out *IngressControllerHTTPHeaders) { + *out = *in + out.UniqueId = in.UniqueId + if in.HeaderNameCaseAdjustments != nil { + in, out := &in.HeaderNameCaseAdjustments, &out.HeaderNameCaseAdjustments + *out = make([]IngressControllerHTTPHeaderNameCaseAdjustment, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaders. +func (in *IngressControllerHTTPHeaders) DeepCopy() *IngressControllerHTTPHeaders { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopyInto(out *IngressControllerHTTPUniqueIdHeaderPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPUniqueIdHeaderPolicy. +func (in *IngressControllerHTTPUniqueIdHeaderPolicy) DeepCopy() *IngressControllerHTTPUniqueIdHeaderPolicy { + if in == nil { + return nil + } + out := new(IngressControllerHTTPUniqueIdHeaderPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerList) DeepCopyInto(out *IngressControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IngressController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerList. +func (in *IngressControllerList) DeepCopy() *IngressControllerList { + if in == nil { + return nil + } + out := new(IngressControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerLogging) DeepCopyInto(out *IngressControllerLogging) { + *out = *in + if in.Access != nil { + in, out := &in.Access, &out.Access + *out = new(AccessLogging) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerLogging. +func (in *IngressControllerLogging) DeepCopy() *IngressControllerLogging { + if in == nil { + return nil + } + out := new(IngressControllerLogging) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) { + *out = *in + out.HttpErrorCodePages = in.HttpErrorCodePages + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.DefaultCertificate != nil { + in, out := &in.DefaultCertificate, &out.DefaultCertificate + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.RouteSelector != nil { + in, out := &in.RouteSelector, &out.RouteSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NodePlacement != nil { + in, out := &in.NodePlacement, &out.NodePlacement + *out = new(NodePlacement) + (*in).DeepCopyInto(*out) + } + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(configv1.TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + if in.RouteAdmission != nil { + in, out := &in.RouteAdmission, &out.RouteAdmission + *out = new(RouteAdmissionPolicy) + **out = **in + } + if in.Logging != nil { + in, out := &in.Logging, &out.Logging + *out = new(IngressControllerLogging) + (*in).DeepCopyInto(*out) + } + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = new(IngressControllerHTTPHeaders) + (*in).DeepCopyInto(*out) + } + out.TuningOptions = in.TuningOptions + in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSpec. +func (in *IngressControllerSpec) DeepCopy() *IngressControllerSpec { + if in == nil { + return nil + } + out := new(IngressControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) { + *out = *in + if in.EndpointPublishingStrategy != nil { + in, out := &in.EndpointPublishingStrategy, &out.EndpointPublishingStrategy + *out = new(EndpointPublishingStrategy) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSProfile != nil { + in, out := &in.TLSProfile, &out.TLSProfile + *out = new(configv1.TLSProfileSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerStatus. +func (in *IngressControllerStatus) DeepCopy() *IngressControllerStatus { + if in == nil { + return nil + } + out := new(IngressControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerTuningOptions) DeepCopyInto(out *IngressControllerTuningOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerTuningOptions. +func (in *IngressControllerTuningOptions) DeepCopy() *IngressControllerTuningOptions { + if in == nil { + return nil + } + out := new(IngressControllerTuningOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServer) DeepCopyInto(out *KubeAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServer. +func (in *KubeAPIServer) DeepCopy() *KubeAPIServer { + if in == nil { + return nil + } + out := new(KubeAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerList) DeepCopyInto(out *KubeAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerList. +func (in *KubeAPIServerList) DeepCopy() *KubeAPIServerList { + if in == nil { + return nil + } + out := new(KubeAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerSpec) DeepCopyInto(out *KubeAPIServerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerSpec. +func (in *KubeAPIServerSpec) DeepCopy() *KubeAPIServerSpec { + if in == nil { + return nil + } + out := new(KubeAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerStatus) DeepCopyInto(out *KubeAPIServerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerStatus. +func (in *KubeAPIServerStatus) DeepCopy() *KubeAPIServerStatus { + if in == nil { + return nil + } + out := new(KubeAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManager) DeepCopyInto(out *KubeControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManager. +func (in *KubeControllerManager) DeepCopy() *KubeControllerManager { + if in == nil { + return nil + } + out := new(KubeControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerList) DeepCopyInto(out *KubeControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerList. +func (in *KubeControllerManagerList) DeepCopy() *KubeControllerManagerList { + if in == nil { + return nil + } + out := new(KubeControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerSpec) DeepCopyInto(out *KubeControllerManagerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerSpec. +func (in *KubeControllerManagerSpec) DeepCopy() *KubeControllerManagerSpec { + if in == nil { + return nil + } + out := new(KubeControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerStatus) DeepCopyInto(out *KubeControllerManagerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerStatus. +func (in *KubeControllerManagerStatus) DeepCopy() *KubeControllerManagerStatus { + if in == nil { + return nil + } + out := new(KubeControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeScheduler) DeepCopyInto(out *KubeScheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeScheduler. +func (in *KubeScheduler) DeepCopy() *KubeScheduler { + if in == nil { + return nil + } + out := new(KubeScheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeScheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerList) DeepCopyInto(out *KubeSchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeScheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerList. +func (in *KubeSchedulerList) DeepCopy() *KubeSchedulerList { + if in == nil { + return nil + } + out := new(KubeSchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeSchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerSpec) DeepCopyInto(out *KubeSchedulerSpec) { + *out = *in + in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerSpec. +func (in *KubeSchedulerSpec) DeepCopy() *KubeSchedulerSpec { + if in == nil { + return nil + } + out := new(KubeSchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerStatus) DeepCopyInto(out *KubeSchedulerStatus) { + *out = *in + in.StaticPodOperatorStatus.DeepCopyInto(&out.StaticPodOperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerStatus. +func (in *KubeSchedulerStatus) DeepCopy() *KubeSchedulerStatus { + if in == nil { + return nil + } + out := new(KubeSchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigrator) DeepCopyInto(out *KubeStorageVersionMigrator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigrator. +func (in *KubeStorageVersionMigrator) DeepCopy() *KubeStorageVersionMigrator { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigrator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigrator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorList) DeepCopyInto(out *KubeStorageVersionMigratorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeStorageVersionMigrator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorList. +func (in *KubeStorageVersionMigratorList) DeepCopy() *KubeStorageVersionMigratorList { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeStorageVersionMigratorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorSpec) DeepCopyInto(out *KubeStorageVersionMigratorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorSpec. +func (in *KubeStorageVersionMigratorSpec) DeepCopy() *KubeStorageVersionMigratorSpec { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeStorageVersionMigratorStatus) DeepCopyInto(out *KubeStorageVersionMigratorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeStorageVersionMigratorStatus. +func (in *KubeStorageVersionMigratorStatus) DeepCopy() *KubeStorageVersionMigratorStatus { + if in == nil { + return nil + } + out := new(KubeStorageVersionMigratorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KuryrConfig) DeepCopyInto(out *KuryrConfig) { + *out = *in + if in.DaemonProbesPort != nil { + in, out := &in.DaemonProbesPort, &out.DaemonProbesPort + *out = new(uint32) + **out = **in + } + if in.ControllerProbesPort != nil { + in, out := &in.ControllerProbesPort, &out.ControllerProbesPort + *out = new(uint32) + **out = **in + } + if in.PoolBatchPorts != nil { + in, out := &in.PoolBatchPorts, &out.PoolBatchPorts + *out = new(uint) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KuryrConfig. +func (in *KuryrConfig) DeepCopy() *KuryrConfig { + if in == nil { + return nil + } + out := new(KuryrConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStrategy) DeepCopyInto(out *LoadBalancerStrategy) { + *out = *in + if in.ProviderParameters != nil { + in, out := &in.ProviderParameters, &out.ProviderParameters + *out = new(ProviderLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStrategy. +func (in *LoadBalancerStrategy) DeepCopy() *LoadBalancerStrategy { + if in == nil { + return nil + } + out := new(LoadBalancerStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingDestination) DeepCopyInto(out *LoggingDestination) { + *out = *in + if in.Syslog != nil { + in, out := &in.Syslog, &out.Syslog + *out = new(SyslogLoggingDestinationParameters) + **out = **in + } + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(ContainerLoggingDestinationParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingDestination. +func (in *LoggingDestination) DeepCopy() *LoggingDestination { + if in == nil { + return nil + } + out := new(LoggingDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResource) DeepCopyInto(out *MyOperatorResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResource. +func (in *MyOperatorResource) DeepCopy() *MyOperatorResource { + if in == nil { + return nil + } + out := new(MyOperatorResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceSpec) DeepCopyInto(out *MyOperatorResourceSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceSpec. +func (in *MyOperatorResourceSpec) DeepCopy() *MyOperatorResourceSpec { + if in == nil { + return nil + } + out := new(MyOperatorResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MyOperatorResourceStatus) DeepCopyInto(out *MyOperatorResourceStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MyOperatorResourceStatus. +func (in *MyOperatorResourceStatus) DeepCopy() *MyOperatorResourceStatus { + if in == nil { + return nil + } + out := new(MyOperatorResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetFlowConfig) DeepCopyInto(out *NetFlowConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetFlowConfig. +func (in *NetFlowConfig) DeepCopy() *NetFlowConfig { + if in == nil { + return nil + } + out := new(NetFlowConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DefaultNetwork.DeepCopyInto(&out.DefaultNetwork) + if in.AdditionalNetworks != nil { + in, out := &in.AdditionalNetworks, &out.AdditionalNetworks + *out = make([]AdditionalNetworkDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DisableMultiNetwork != nil { + in, out := &in.DisableMultiNetwork, &out.DisableMultiNetwork + *out = new(bool) + **out = **in + } + if in.UseMultiNetworkPolicy != nil { + in, out := &in.UseMultiNetworkPolicy, &out.UseMultiNetworkPolicy + *out = new(bool) + **out = **in + } + if in.DeployKubeProxy != nil { + in, out := &in.DeployKubeProxy, &out.DeployKubeProxy + *out = new(bool) + **out = **in + } + if in.KubeProxyConfig != nil { + in, out := &in.KubeProxyConfig, &out.KubeProxyConfig + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } + if in.ExportNetworkFlows != nil { + in, out := &in.ExportNetworkFlows, &out.ExportNetworkFlows + *out = new(ExportNetworkFlows) + (*in).DeepCopyInto(*out) + } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement. +func (in *NodePlacement) DeepCopy() *NodePlacement { + if in == nil { + return nil + } + out := new(NodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortStrategy) DeepCopyInto(out *NodePortStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortStrategy. +func (in *NodePortStrategy) DeepCopy() *NodePortStrategy { + if in == nil { + return nil + } + out := new(NodePortStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.LastFailedTime != nil { + in, out := &in.LastFailedTime, &out.LastFailedTime + *out = (*in).DeepCopy() + } + if in.LastFailedRevisionErrors != nil { + in, out := &in.LastFailedRevisionErrors, &out.LastFailedRevisionErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAPIServerStatus) DeepCopyInto(out *OAuthAPIServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAPIServerStatus. +func (in *OAuthAPIServerStatus) DeepCopy() *OAuthAPIServerStatus { + if in == nil { + return nil + } + out := new(OAuthAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.GenevePort != nil { + in, out := &in.GenevePort, &out.GenevePort + *out = new(uint32) + **out = **in + } + if in.HybridOverlayConfig != nil { + in, out := &in.HybridOverlayConfig, &out.HybridOverlayConfig + *out = new(HybridOverlayConfig) + (*in).DeepCopyInto(*out) + } + if in.IPsecConfig != nil { + in, out := &in.IPsecConfig, &out.IPsecConfig + *out = new(IPsecConfig) + **out = **in + } + if in.PolicyAuditConfig != nil { + in, out := &in.PolicyAuditConfig, &out.PolicyAuditConfig + *out = new(PolicyAuditConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. +func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServer) DeepCopyInto(out *OpenShiftAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServer. +func (in *OpenShiftAPIServer) DeepCopy() *OpenShiftAPIServer { + if in == nil { + return nil + } + out := new(OpenShiftAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerList) DeepCopyInto(out *OpenShiftAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerList. +func (in *OpenShiftAPIServerList) DeepCopy() *OpenShiftAPIServerList { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerSpec) DeepCopyInto(out *OpenShiftAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerSpec. +func (in *OpenShiftAPIServerSpec) DeepCopy() *OpenShiftAPIServerSpec { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerStatus) DeepCopyInto(out *OpenShiftAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerStatus. +func (in *OpenShiftAPIServerStatus) DeepCopy() *OpenShiftAPIServerStatus { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManager) DeepCopyInto(out *OpenShiftControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManager. +func (in *OpenShiftControllerManager) DeepCopy() *OpenShiftControllerManager { + if in == nil { + return nil + } + out := new(OpenShiftControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerList) DeepCopyInto(out *OpenShiftControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OpenShiftControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerList. +func (in *OpenShiftControllerManagerList) DeepCopy() *OpenShiftControllerManagerList { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerSpec) DeepCopyInto(out *OpenShiftControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerSpec. +func (in *OpenShiftControllerManagerSpec) DeepCopy() *OpenShiftControllerManagerSpec { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerStatus) DeepCopyInto(out *OpenShiftControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerStatus. +func (in *OpenShiftControllerManagerStatus) DeepCopy() *OpenShiftControllerManagerStatus { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftSDNConfig) DeepCopyInto(out *OpenShiftSDNConfig) { + *out = *in + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + *out = new(uint32) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + if in.UseExternalOpenvswitch != nil { + in, out := &in.UseExternalOpenvswitch, &out.UseExternalOpenvswitch + *out = new(bool) + **out = **in + } + if in.EnableUnidling != nil { + in, out := &in.EnableUnidling, &out.EnableUnidling + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftSDNConfig. +func (in *OpenShiftSDNConfig) DeepCopy() *OpenShiftSDNConfig { + if in == nil { + return nil + } + out := new(OpenShiftSDNConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition. +func (in *OperatorCondition) DeepCopy() *OperatorCondition { + if in == nil { + return nil + } + out := new(OperatorCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { + *out = *in + in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + in.ObservedConfig.DeepCopyInto(&out.ObservedConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. +func (in *OperatorSpec) DeepCopy() *OperatorSpec { + if in == nil { + return nil + } + out := new(OperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Generations != nil { + in, out := &in.Generations, &out.Generations + *out = make([]GenerationStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus. +func (in *OperatorStatus) DeepCopy() *OperatorStatus { + if in == nil { + return nil + } + out := new(OperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyAuditConfig) DeepCopyInto(out *PolicyAuditConfig) { + *out = *in + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(uint32) + **out = **in + } + if in.MaxFileSize != nil { + in, out := &in.MaxFileSize, &out.MaxFileSize + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyAuditConfig. +func (in *PolicyAuditConfig) DeepCopy() *PolicyAuditConfig { + if in == nil { + return nil + } + out := new(PolicyAuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateStrategy) DeepCopyInto(out *PrivateStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateStrategy. +func (in *PrivateStrategy) DeepCopy() *PrivateStrategy { + if in == nil { + return nil + } + out := new(PrivateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectAccess) DeepCopyInto(out *ProjectAccess) { + *out = *in + if in.AvailableClusterRoles != nil { + in, out := &in.AvailableClusterRoles, &out.AvailableClusterRoles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectAccess. +func (in *ProjectAccess) DeepCopy() *ProjectAccess { + if in == nil { + return nil + } + out := new(ProjectAccess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderLoadBalancerParameters) DeepCopyInto(out *ProviderLoadBalancerParameters) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSLoadBalancerParameters) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPLoadBalancerParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderLoadBalancerParameters. +func (in *ProviderLoadBalancerParameters) DeepCopy() *ProviderLoadBalancerParameters { + if in == nil { + return nil + } + out := new(ProviderLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ProxyArgumentList) DeepCopyInto(out *ProxyArgumentList) { + { + in := &in + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyArgumentList. +func (in ProxyArgumentList) DeepCopy() ProxyArgumentList { + if in == nil { + return nil + } + out := new(ProxyArgumentList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.ProxyArguments != nil { + in, out := &in.ProxyArguments, &out.ProxyArguments + *out = make(map[string]ProxyArgumentList, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ProxyArgumentList, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuickStarts) DeepCopyInto(out *QuickStarts) { + *out = *in + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuickStarts. +func (in *QuickStarts) DeepCopy() *QuickStarts { + if in == nil { + return nil + } + out := new(QuickStarts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteAdmissionPolicy) DeepCopyInto(out *RouteAdmissionPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteAdmissionPolicy. +func (in *RouteAdmissionPolicy) DeepCopy() *RouteAdmissionPolicy { + if in == nil { + return nil + } + out := new(RouteAdmissionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SFlowConfig) DeepCopyInto(out *SFlowConfig) { + *out = *in + if in.Collectors != nil { + in, out := &in.Collectors, &out.Collectors + *out = make([]IPPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFlowConfig. +func (in *SFlowConfig) DeepCopy() *SFlowConfig { + if in == nil { + return nil + } + out := new(SFlowConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.ForwardPlugin.DeepCopyInto(&out.ForwardPlugin) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCA) DeepCopyInto(out *ServiceCA) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCA. +func (in *ServiceCA) DeepCopy() *ServiceCA { + if in == nil { + return nil + } + out := new(ServiceCA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCA) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAList) DeepCopyInto(out *ServiceCAList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCA, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAList. +func (in *ServiceCAList) DeepCopy() *ServiceCAList { + if in == nil { + return nil + } + out := new(ServiceCAList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCAList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCASpec) DeepCopyInto(out *ServiceCASpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCASpec. +func (in *ServiceCASpec) DeepCopy() *ServiceCASpec { + if in == nil { + return nil + } + out := new(ServiceCASpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCAStatus) DeepCopyInto(out *ServiceCAStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCAStatus. +func (in *ServiceCAStatus) DeepCopy() *ServiceCAStatus { + if in == nil { + return nil + } + out := new(ServiceCAStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServer) DeepCopyInto(out *ServiceCatalogAPIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServer. +func (in *ServiceCatalogAPIServer) DeepCopy() *ServiceCatalogAPIServer { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerList) DeepCopyInto(out *ServiceCatalogAPIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogAPIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerList. +func (in *ServiceCatalogAPIServerList) DeepCopy() *ServiceCatalogAPIServerList { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogAPIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerSpec) DeepCopyInto(out *ServiceCatalogAPIServerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerSpec. +func (in *ServiceCatalogAPIServerSpec) DeepCopy() *ServiceCatalogAPIServerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogAPIServerStatus) DeepCopyInto(out *ServiceCatalogAPIServerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogAPIServerStatus. +func (in *ServiceCatalogAPIServerStatus) DeepCopy() *ServiceCatalogAPIServerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogAPIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManager) DeepCopyInto(out *ServiceCatalogControllerManager) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManager. +func (in *ServiceCatalogControllerManager) DeepCopy() *ServiceCatalogControllerManager { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManager) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerList) DeepCopyInto(out *ServiceCatalogControllerManagerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCatalogControllerManager, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerList. +func (in *ServiceCatalogControllerManagerList) DeepCopy() *ServiceCatalogControllerManagerList { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCatalogControllerManagerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerSpec) DeepCopyInto(out *ServiceCatalogControllerManagerSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerSpec. +func (in *ServiceCatalogControllerManagerSpec) DeepCopy() *ServiceCatalogControllerManagerSpec { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCatalogControllerManagerStatus) DeepCopyInto(out *ServiceCatalogControllerManagerStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCatalogControllerManagerStatus. +func (in *ServiceCatalogControllerManagerStatus) DeepCopy() *ServiceCatalogControllerManagerStatus { + if in == nil { + return nil + } + out := new(ServiceCatalogControllerManagerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SimpleMacvlanConfig) DeepCopyInto(out *SimpleMacvlanConfig) { + *out = *in + if in.IPAMConfig != nil { + in, out := &in.IPAMConfig, &out.IPAMConfig + *out = new(IPAMConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SimpleMacvlanConfig. +func (in *SimpleMacvlanConfig) DeepCopy() *SimpleMacvlanConfig { + if in == nil { + return nil + } + out := new(SimpleMacvlanConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMAddresses) DeepCopyInto(out *StaticIPAMAddresses) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMAddresses. +func (in *StaticIPAMAddresses) DeepCopy() *StaticIPAMAddresses { + if in == nil { + return nil + } + out := new(StaticIPAMAddresses) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMConfig) DeepCopyInto(out *StaticIPAMConfig) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]StaticIPAMAddresses, len(*in)) + copy(*out, *in) + } + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]StaticIPAMRoutes, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = new(StaticIPAMDNS) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMConfig. +func (in *StaticIPAMConfig) DeepCopy() *StaticIPAMConfig { + if in == nil { + return nil + } + out := new(StaticIPAMConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMDNS) DeepCopyInto(out *StaticIPAMDNS) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Search != nil { + in, out := &in.Search, &out.Search + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMDNS. +func (in *StaticIPAMDNS) DeepCopy() *StaticIPAMDNS { + if in == nil { + return nil + } + out := new(StaticIPAMDNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticIPAMRoutes) DeepCopyInto(out *StaticIPAMRoutes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticIPAMRoutes. +func (in *StaticIPAMRoutes) DeepCopy() *StaticIPAMRoutes { + if in == nil { + return nil + } + out := new(StaticIPAMRoutes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorSpec) DeepCopyInto(out *StaticPodOperatorSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorSpec. +func (in *StaticPodOperatorSpec) DeepCopy() *StaticPodOperatorSpec { + if in == nil { + return nil + } + out := new(StaticPodOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + if in.NodeStatuses != nil { + in, out := &in.NodeStatuses, &out.NodeStatuses + *out = make([]NodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus. +func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus { + if in == nil { + return nil + } + out := new(StaticPodOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatuspageProvider) DeepCopyInto(out *StatuspageProvider) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatuspageProvider. +func (in *StatuspageProvider) DeepCopy() *StatuspageProvider { + if in == nil { + return nil + } + out := new(StatuspageProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Storage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageList) DeepCopyInto(out *StorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageList. +func (in *StorageList) DeepCopy() *StorageList { + if in == nil { + return nil + } + out := new(StorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatus) DeepCopyInto(out *StorageStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatus. +func (in *StorageStatus) DeepCopy() *StorageStatus { + if in == nil { + return nil + } + out := new(StorageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyslogLoggingDestinationParameters) DeepCopyInto(out *SyslogLoggingDestinationParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyslogLoggingDestinationParameters. +func (in *SyslogLoggingDestinationParameters) DeepCopy() *SyslogLoggingDestinationParameters { + if in == nil { + return nil + } + out := new(SyslogLoggingDestinationParameters) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..0324f679c5 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,1219 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_GenerationStatus = map[string]string{ + "": "GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.", + "group": "group is the group of the thing you're tracking", + "resource": "resource is the resource type of the thing you're tracking", + "namespace": "namespace is where the thing you're tracking is", + "name": "name is the name of the thing you're tracking", + "lastGeneration": "lastGeneration is the last generation of the workload controller involved", + "hash": "hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps", +} + +func (GenerationStatus) SwaggerDoc() map[string]string { + return map_GenerationStatus +} + +var map_MyOperatorResource = map[string]string{ + "": "MyOperatorResource is an example operator configuration type", +} + +func (MyOperatorResource) SwaggerDoc() map[string]string { + return map_MyOperatorResource +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus provides information about the current state of a particular node managed by this operator.", + "nodeName": "nodeName is the name of the node", + "currentRevision": "currentRevision is the generation of the most recently successful deployment", + "targetRevision": "targetRevision is the generation of the deployment we're trying to apply", + "lastFailedRevision": "lastFailedRevision is the generation of the deployment we tried and failed to deploy.", + "lastFailedTime": "lastFailedTime is the time the last failed revision failed the last time.", + "lastFailedCount": "lastFailedCount is how often the last failed revision failed.", + "lastFailedRevisionErrors": "lastFailedRevisionErrors is a list of the errors during the failed deployment referenced in lastFailedRevision", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_OperatorCondition = map[string]string{ + "": "OperatorCondition is just the standard condition fields.", +} + +func (OperatorCondition) SwaggerDoc() map[string]string { + return map_OperatorCondition +} + +var map_OperatorSpec = map[string]string{ + "": "OperatorSpec contains common fields operators need. It is intended to be anonymous included inside of the Spec struct for your particular operator.", + "managementState": "managementState indicates whether and how the operator should manage the component", + "logLevel": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", + "unsupportedConfigOverrides": "unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides", + "observedConfig": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator", +} + +func (OperatorSpec) SwaggerDoc() map[string]string { + return map_OperatorSpec +} + +var map_OperatorStatus = map[string]string{ + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "version": "version is the level this availability applies to", + "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state", + "generations": "generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction.", +} + +func (OperatorStatus) SwaggerDoc() map[string]string { + return map_OperatorStatus +} + +var map_StaticPodOperatorSpec = map[string]string{ + "": "StaticPodOperatorSpec is spec for controllers that manage static pods.", + "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the operand by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", + "failedRevisionLimit": "failedRevisionLimit is the number of failed static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", + "succeededRevisionLimit": "succeededRevisionLimit is the number of successful static pod installer revisions to keep on disk and in the api -1 = unlimited, 0 or unset = 5 (default)", +} + +func (StaticPodOperatorSpec) SwaggerDoc() map[string]string { + return map_StaticPodOperatorSpec +} + +var map_StaticPodOperatorStatus = map[string]string{ + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "latestAvailableRevision": "latestAvailableRevision is the deploymentID of the most recent deployment", + "latestAvailableRevisionReason": "latestAvailableRevisionReason describe the detailed reason for the most recent deployment", + "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", +} + +func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { + return map_StaticPodOperatorStatus +} + +var map_Authentication = map[string]string{ + "": "Authentication provides information to configure an operator to manage authentication.", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationList = map[string]string{ + "": "AuthenticationList is a collection of items", +} + +func (AuthenticationList) SwaggerDoc() map[string]string { + return map_AuthenticationList +} + +var map_AuthenticationStatus = map[string]string{ + "oauthAPIServer": "OAuthAPIServer holds status specific only to oauth-apiserver", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_OAuthAPIServerStatus = map[string]string{ + "latestAvailableRevision": "LatestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", +} + +func (OAuthAPIServerStatus) SwaggerDoc() map[string]string { + return map_OAuthAPIServerStatus +} + +var map_CloudCredential = map[string]string{ + "": "CloudCredential provides a means to configure an operator to manage CredentialsRequests.", +} + +func (CloudCredential) SwaggerDoc() map[string]string { + return map_CloudCredential +} + +var map_CloudCredentialSpec = map[string]string{ + "": "CloudCredentialSpec is the specification of the desired behavior of the cloud-credential-operator.", + "credentialsMode": "CredentialsMode allows informing CCO that it should not attempt to dynamically determine the root cloud credentials capabilities, and it should just run in the specified mode. It also allows putting the operator into \"manual\" mode if desired. Leaving the field in default mode runs CCO so that the cluster's cloud credentials will be dynamically probed for capabilities (on supported clouds/platforms). Supported modes:\n AWS/Azure/GCP: \"\" (Default), \"Mint\", \"Passthrough\", \"Manual\"\n Others: Do not set value as other platforms only support running in \"Passthrough\"", +} + +func (CloudCredentialSpec) SwaggerDoc() map[string]string { + return map_CloudCredentialSpec +} + +var map_CloudCredentialStatus = map[string]string{ + "": "CloudCredentialStatus defines the observed status of the cloud-credential-operator.", +} + +func (CloudCredentialStatus) SwaggerDoc() map[string]string { + return map_CloudCredentialStatus +} + +var map_Config = map[string]string{ + "": "Config provides information to configure the config operator. It handles installation, migration or synchronization of cloud based cluster configurations like AWS or Azure.", + "spec": "spec is the specification of the desired behavior of the Config Operator.", + "status": "status defines the observed status of the Config Operator.", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_ConfigList = map[string]string{ + "": "ConfigList is a collection of items", + "items": "Items contains the items", +} + +func (ConfigList) SwaggerDoc() map[string]string { + return map_ConfigList +} + +var map_AddPage = map[string]string{ + "": "AddPage allows customizing actions on the Add page in developer perspective.", + "disabledActions": "disabledActions is a list of actions that are not shown to users. Each action in the list is represented by its ID.", +} + +func (AddPage) SwaggerDoc() map[string]string { + return map_AddPage +} + +var map_Console = map[string]string{ + "": "Console provides a means to configure an operator to manage the console.", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleConfigRoute = map[string]string{ + "": "ConsoleConfigRoute holds information on external route access to console. DEPRECATED", + "hostname": "hostname is the desired custom domain under which console will be available.", + "secret": "secret points to secret in the openshift-config namespace that contains custom certificate and key and needs to be created manually by the cluster admin. Referenced Secret is required to contain following key value pairs: - \"tls.crt\" - to specifies custom certificate - \"tls.key\" - to specifies private key of the custom certificate If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ConsoleConfigRoute) SwaggerDoc() map[string]string { + return map_ConsoleConfigRoute +} + +var map_ConsoleCustomization = map[string]string{ + "": "ConsoleCustomization defines a list of optional configuration for the console UI.", + "brand": "brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout.", + "documentationBaseURL": "documentationBaseURL links to external documentation are shown in various sections of the web console. Providing documentationBaseURL will override the default documentation URL. Invalid value will prevent a console rollout.", + "customProductName": "customProductName is the name that will be displayed in page titles, logo alt text, and the about dialog instead of the normal OpenShift product name.", + "customLogoFile": "customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like 'oc create configmap custom-logo --from-file=/path/to/file -n openshift-config'. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred", + "developerCatalog": "developerCatalog allows to configure the shown developer catalog categories.", + "projectAccess": "projectAccess allows customizing the available list of ClusterRoles in the Developer perspective Project access page which can be used by a project admin to specify roles to other users and restrict access within the project. If set, the list will replace the default ClusterRole options.", + "quickStarts": "quickStarts allows customization of available ConsoleQuickStart resources in console.", + "addPage": "addPage allows customizing actions on the Add page in developer perspective.", +} + +func (ConsoleCustomization) SwaggerDoc() map[string]string { + return map_ConsoleCustomization +} + +var map_ConsoleProviders = map[string]string{ + "": "ConsoleProviders defines a list of optional additional providers of functionality to the console.", + "statuspage": "statuspage contains ID for statuspage.io page that provides status info about.", +} + +func (ConsoleProviders) SwaggerDoc() map[string]string { + return map_ConsoleProviders +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", + "customization": "customization is used to optionally provide a small set of customization options to the web console.", + "providers": "providers contains configuration for using specific service providers.", + "route": "route contains hostname and secret reference that contains the serving certificate. If a custom route is specified, a new route will be created with the provided hostname, under which console will be available. In case of custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. In case of custom hostname points to an arbitrary domain, manual DNS configurations steps are necessary. The default console route will be maintained to reserve the default hostname for console if the custom route is removed. If not specified, default route will be used. DEPRECATED", + "plugins": "plugins defines a list of enabled console plugin names.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_DeveloperConsoleCatalogCategory = map[string]string{ + "": "DeveloperConsoleCatalogCategory for the developer console catalog.", + "subcategories": "subcategories defines a list of child categories.", +} + +func (DeveloperConsoleCatalogCategory) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCategory +} + +var map_DeveloperConsoleCatalogCategoryMeta = map[string]string{ + "": "DeveloperConsoleCatalogCategoryMeta are the key identifiers of a developer catalog category.", + "id": "ID is an identifier used in the URL to enable deep linking in console. ID is required and must have 1-32 URL safe (A-Z, a-z, 0-9, - and _) characters.", + "label": "label defines a category display label. It is required and must have 1-64 characters.", + "tags": "tags is a list of strings that will match the category. A selected category show all items which has at least one overlapping tag between category and item.", +} + +func (DeveloperConsoleCatalogCategoryMeta) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCategoryMeta +} + +var map_DeveloperConsoleCatalogCustomization = map[string]string{ + "": "DeveloperConsoleCatalogCustomization allow cluster admin to configure developer catalog.", + "categories": "categories which are shown in the developer catalog.", +} + +func (DeveloperConsoleCatalogCustomization) SwaggerDoc() map[string]string { + return map_DeveloperConsoleCatalogCustomization +} + +var map_ProjectAccess = map[string]string{ + "": "ProjectAccess contains options for project access roles", + "availableClusterRoles": "availableClusterRoles is the list of ClusterRole names that are assignable to users through the project access tab.", +} + +func (ProjectAccess) SwaggerDoc() map[string]string { + return map_ProjectAccess +} + +var map_QuickStarts = map[string]string{ + "": "QuickStarts allow cluster admins to customize available ConsoleQuickStart resources.", + "disabled": "disabled is a list of ConsoleQuickStart resource names that are not shown to users.", +} + +func (QuickStarts) SwaggerDoc() map[string]string { + return map_QuickStarts +} + +var map_StatuspageProvider = map[string]string{ + "": "StatuspageProvider provides identity for statuspage account.", + "pageID": "pageID is the unique ID assigned by Statuspage for your page. This must be a public page.", +} + +func (StatuspageProvider) SwaggerDoc() map[string]string { + return map_StatuspageProvider +} + +var map_ClusterCSIDriver = map[string]string{ + "": "ClusterCSIDriver object allows management and configuration of a CSI driver operator installed by default in OpenShift. Name of the object must be name of the CSI driver it operates. See CSIDriverName type for list of allowed values.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ClusterCSIDriver) SwaggerDoc() map[string]string { + return map_ClusterCSIDriver +} + +var map_ClusterCSIDriverList = map[string]string{ + "": "ClusterCSIDriverList contains a list of ClusterCSIDriver", +} + +func (ClusterCSIDriverList) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverList +} + +var map_ClusterCSIDriverSpec = map[string]string{ + "": "ClusterCSIDriverSpec is the desired behavior of CSI driver operator", +} + +func (ClusterCSIDriverSpec) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverSpec +} + +var map_ClusterCSIDriverStatus = map[string]string{ + "": "ClusterCSIDriverStatus is the observed status of CSI driver operator", +} + +func (ClusterCSIDriverStatus) SwaggerDoc() map[string]string { + return map_ClusterCSIDriverStatus +} + +var map_CSISnapshotController = map[string]string{ + "": "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (CSISnapshotController) SwaggerDoc() map[string]string { + return map_CSISnapshotController +} + +var map_CSISnapshotControllerList = map[string]string{ + "": "CSISnapshotControllerList contains a list of CSISnapshotControllers.", +} + +func (CSISnapshotControllerList) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerList +} + +var map_CSISnapshotControllerSpec = map[string]string{ + "": "CSISnapshotControllerSpec is the specification of the desired behavior of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerSpec) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerSpec +} + +var map_CSISnapshotControllerStatus = map[string]string{ + "": "CSISnapshotControllerStatus defines the observed status of the CSISnapshotController operator.", +} + +func (CSISnapshotControllerStatus) SwaggerDoc() map[string]string { + return map_CSISnapshotControllerStatus +} + +var map_DNS = map[string]string{ + "": "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster.\n\nThis supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md\n\nMore details: https://kubernetes.io/docs/tasks/administer-cluster/coredns", + "spec": "spec is the specification of the desired behavior of the DNS.", + "status": "status is the most recently observed status of the DNS.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSList = map[string]string{ + "": "DNSList contains a list of DNS", +} + +func (DNSList) SwaggerDoc() map[string]string { + return map_DNSList +} + +var map_DNSNodePlacement = map[string]string{ + "": "DNSNodePlacement describes the node scheduling configuration for DNS pods.", + "nodeSelector": "nodeSelector is the node selector applied to DNS pods.\n\nIf empty, the default is used, which is currently the following:\n\n kubernetes.io/os: linux\n\nThis default is subject to change.\n\nIf set, the specified selector is used and replaces the default.", + "tolerations": "tolerations is a list of tolerations applied to DNS pods.\n\nThe default is an empty list. This default is subject to change.\n\nSee https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/", +} + +func (DNSNodePlacement) SwaggerDoc() map[string]string { + return map_DNSNodePlacement +} + +var map_DNSSpec = map[string]string{ + "": "DNSSpec is the specification of the desired behavior of the DNS.", + "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.", + "nodePlacement": "nodePlacement provides explicit control over the scheduling of DNS pods.\n\nGenerally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint.\n\nIf unset, defaults are used. See nodePlacement for more details.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSStatus = map[string]string{ + "": "DNSStatus defines the observed status of the DNS.", + "clusterIP": "clusterIP is the service IP through which this DNS is made available.\n\nIn the case of the default DNS, this will be a well known IP that is used as the default nameserver for pods that are using the default ClusterFirst DNS policy.\n\nIn general, this IP can be specified in a pod's spec.dnsConfig.nameservers list or used explicitly when performing name resolution from within the cluster. Example: dig foo.com @\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "clusterDomain": "clusterDomain is the local cluster DNS domain suffix for DNS services. This will be a subdomain as defined in RFC 1034, section 3.5: https://tools.ietf.org/html/rfc1034#section-3.5 Example: \"cluster.local\"\n\nMore info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service", + "conditions": "conditions provide information about the state of the DNS on the cluster.\n\nThese are the supported DNS conditions:\n\n * Available\n - True if the following conditions are met:\n * DNS controller daemonset is available.\n - False if any of those conditions are unsatisfied.", +} + +func (DNSStatus) SwaggerDoc() map[string]string { + return map_DNSStatus +} + +var map_ForwardPlugin = map[string]string{ + "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", + "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Upstreams are randomized when more than 1 upstream is specified. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", +} + +func (ForwardPlugin) SwaggerDoc() map[string]string { + return map_ForwardPlugin +} + +var map_Server = map[string]string{ + "": "Server defines the schema for a server that runs per instance of CoreDNS.", + "name": "name is required and specifies a unique name for the server. Name must comply with the Service Name Syntax of rfc6335.", + "zones": "zones is required and specifies the subdomains that Server is authoritative for. Zones must conform to the rfc1123 definition of a subdomain. Specifying the cluster domain (i.e., \"cluster.local\") is invalid.", + "forwardPlugin": "forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers.", +} + +func (Server) SwaggerDoc() map[string]string { + return map_Server +} + +var map_Etcd = map[string]string{ + "": "Etcd provides information to configure an operator to manage etcd.", +} + +func (Etcd) SwaggerDoc() map[string]string { + return map_Etcd +} + +var map_EtcdList = map[string]string{ + "": "KubeAPISOperatorConfigList is a collection of items", + "items": "Items contains the items", +} + +func (EtcdList) SwaggerDoc() map[string]string { + return map_EtcdList +} + +var map_AWSClassicLoadBalancerParameters = map[string]string{ + "": "AWSClassicLoadBalancerParameters holds configuration parameters for an AWS Classic load balancer.", +} + +func (AWSClassicLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSClassicLoadBalancerParameters +} + +var map_AWSLoadBalancerParameters = map[string]string{ + "": "AWSLoadBalancerParameters provides configuration settings that are specific to AWS load balancers.", + "type": "type is the type of AWS load balancer to instantiate for an ingresscontroller.\n\nValid values are:\n\n* \"Classic\": A Classic Load Balancer that makes routing decisions at either\n the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See\n the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb\n\n* \"NLB\": A Network Load Balancer that makes routing decisions at the\n transport layer (TCP/SSL). See the following for additional details:\n\n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb", + "classicLoadBalancer": "classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic.", + "networkLoadBalancer": "networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB.", +} + +func (AWSLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSLoadBalancerParameters +} + +var map_AWSNetworkLoadBalancerParameters = map[string]string{ + "": "AWSNetworkLoadBalancerParameters holds configuration parameters for an AWS Network load balancer.", +} + +func (AWSNetworkLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_AWSNetworkLoadBalancerParameters +} + +var map_AccessLogging = map[string]string{ + "": "AccessLogging describes how client requests should be logged.", + "destination": "destination is where access logs go.", + "httpLogFormat": "httpLogFormat specifies the format of the log message for an HTTP request.\n\nIf this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3\n\nNote that this format only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). It does not affect the log format for TLS passthrough connections.", + "httpCaptureHeaders": "httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured.\n\nNote that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections.", + "httpCaptureCookies": "httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured.", +} + +func (AccessLogging) SwaggerDoc() map[string]string { + return map_AccessLogging +} + +var map_ContainerLoggingDestinationParameters = map[string]string{ + "": "ContainerLoggingDestinationParameters describes parameters for the Container logging destination type.", +} + +func (ContainerLoggingDestinationParameters) SwaggerDoc() map[string]string { + return map_ContainerLoggingDestinationParameters +} + +var map_EndpointPublishingStrategy = map[string]string{ + "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.", + "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.\n\n* NodePortService\n\nPublishes the ingress controller using a Kubernetes NodePort Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved.", + "loadBalancer": "loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService.", + "hostNetwork": "hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork.", + "private": "private holds parameters for the Private endpoint publishing strategy. Present only if type is Private.", + "nodePort": "nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService.", +} + +func (EndpointPublishingStrategy) SwaggerDoc() map[string]string { + return map_EndpointPublishingStrategy +} + +var map_GCPLoadBalancerParameters = map[string]string{ + "": "GCPLoadBalancerParameters provides configuration settings that are specific to GCP load balancers.", + "clientAccess": "clientAccess describes how client access is restricted for internal load balancers.\n\nValid values are: * \"Global\": Specifying an internal load balancer with Global client access\n allows clients from any region within the VPC to communicate with the load\n balancer.\n\n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access\n\n* \"Local\": Specifying an internal load balancer with Local client access\n means only clients within the same region (and VPC) as the GCP load balancer\n can communicate with the load balancer. Note that this is the default behavior.\n\n https://cloud.google.com/load-balancing/docs/internal#client_access", +} + +func (GCPLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_GCPLoadBalancerParameters +} + +var map_HostNetworkStrategy = map[string]string{ + "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", +} + +func (HostNetworkStrategy) SwaggerDoc() map[string]string { + return map_HostNetworkStrategy +} + +var map_IngressController = map[string]string{ + "": "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources.\n\nWhen an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out.\n\nhttps://kubernetes.io/docs/concepts/services-networking/ingress-controllers\n\nWhenever possible, sensible defaults for the platform are used. See each field for more details.", + "spec": "spec is the specification of the desired behavior of the IngressController.", + "status": "status is the most recently observed status of the IngressController.", +} + +func (IngressController) SwaggerDoc() map[string]string { + return map_IngressController +} + +var map_IngressControllerCaptureHTTPCookie = map[string]string{ + "": "IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured.", + "maxLength": "maxLength specifies a maximum length of the string that will be logged, which includes the cookie name, cookie value, and one-character delimiter. If the log entry exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", +} + +func (IngressControllerCaptureHTTPCookie) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPCookie +} + +var map_IngressControllerCaptureHTTPCookieUnion = map[string]string{ + "": "IngressControllerCaptureHTTPCookieUnion describes optional fields of an HTTP cookie that should be captured.", + "matchType": "matchType specifies the type of match to be performed on the cookie name. Allowed values are \"Exact\" for an exact string match and \"Prefix\" for a string prefix match. If \"Exact\" is specified, a name must be specified in the name field. If \"Prefix\" is provided, a prefix must be specified in the namePrefix field. For example, specifying matchType \"Prefix\" and namePrefix \"foo\" will capture a cookie named \"foo\" or \"foobar\" but not one named \"bar\". The first matching cookie is captured.", + "name": "name specifies a cookie name. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", + "namePrefix": "namePrefix specifies a cookie name prefix. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1.", +} + +func (IngressControllerCaptureHTTPCookieUnion) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPCookieUnion +} + +var map_IngressControllerCaptureHTTPHeader = map[string]string{ + "": "IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured.", + "name": "name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2.", + "maxLength": "maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request.", +} + +func (IngressControllerCaptureHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPHeader +} + +var map_IngressControllerCaptureHTTPHeaders = map[string]string{ + "": "IngressControllerCaptureHTTPHeaders specifies which HTTP headers the IngressController captures.", + "request": "request specifies which HTTP request headers to capture.\n\nIf this field is empty, no request headers are captured.", + "response": "response specifies which HTTP response headers to capture.\n\nIf this field is empty, no response headers are captured.", +} + +func (IngressControllerCaptureHTTPHeaders) SwaggerDoc() map[string]string { + return map_IngressControllerCaptureHTTPHeaders +} + +var map_IngressControllerHTTPHeaders = map[string]string{ + "": "IngressControllerHTTPHeaders specifies how the IngressController handles certain HTTP headers.", + "forwardedHeaderPolicy": "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following:\n\n* \"Append\", which specifies that the IngressController appends the\n headers, preserving existing headers.\n\n* \"Replace\", which specifies that the IngressController sets the\n headers, replacing any existing Forwarded or X-Forwarded-* headers.\n\n* \"IfNone\", which specifies that the IngressController sets the\n headers if they are not already set.\n\n* \"Never\", which specifies that the IngressController never sets the\n headers, preserving any existing headers.\n\nBy default, the policy is \"Append\".", + "uniqueId": "uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests.\n\nIf this field is empty, no such header is injected into requests.", + "headerNameCaseAdjustments": "headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying \"X-Forwarded-For\" indicates that the \"x-forwarded-for\" HTTP header should be adjusted to have the specified capitalization.\n\nThese adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1.\n\nFor request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses.\n\nIf this field is empty, no request headers are adjusted.", +} + +func (IngressControllerHTTPHeaders) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaders +} + +var map_IngressControllerHTTPUniqueIdHeaderPolicy = map[string]string{ + "": "IngressControllerHTTPUniqueIdHeaderPolicy describes configuration for a unique id header.", + "name": "name specifies the name of the HTTP header (for example, \"unique-id\") that the ingress controller should inject into HTTP requests. The field's value must be a valid HTTP header name as defined in RFC 2616 section 4.2. If the field is empty, no header is injected.", + "format": "format specifies the format for the injected HTTP header's value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is \"%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid\"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3", +} + +func (IngressControllerHTTPUniqueIdHeaderPolicy) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPUniqueIdHeaderPolicy +} + +var map_IngressControllerList = map[string]string{ + "": "IngressControllerList contains a list of IngressControllers.", +} + +func (IngressControllerList) SwaggerDoc() map[string]string { + return map_IngressControllerList +} + +var map_IngressControllerLogging = map[string]string{ + "": "IngressControllerLogging describes what should be logged where.", + "access": "access describes how the client requests should be logged.\n\nIf this field is empty, access logging is disabled.", +} + +func (IngressControllerLogging) SwaggerDoc() map[string]string { + return map_IngressControllerLogging +} + +var map_IngressControllerSpec = map[string]string{ + "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", + "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", + "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", + "replicas": "replicas is the desired number of ingress controller replicas. If unset, defaults to 2.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", + "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", + "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", + "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.\n\nNote that the minimum TLS version for ingress controllers is 1.1, and the maximum TLS version is 1.2. An implication of this restriction is that the Modern TLS profile type cannot be used because it requires TLS 1.3.", + "routeAdmission": "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces).\n\nIf empty, defaults will be applied. See specific routeAdmission fields for details about their defaults.", + "logging": "logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled.", + "httpHeaders": "httpHeaders defines policy for HTTP headers.\n\nIf this field is empty, the default values are used.", + "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", + "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", +} + +func (IngressControllerSpec) SwaggerDoc() map[string]string { + return map_IngressControllerSpec +} + +var map_IngressControllerStatus = map[string]string{ + "": "IngressControllerStatus defines the observed status of the IngressController.", + "availableReplicas": "availableReplicas is number of observed available replicas according to the ingress controller deployment.", + "selector": "selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas.", + "domain": "domain is the actual domain in use.", + "endpointPublishingStrategy": "endpointPublishingStrategy is the actual strategy in use.", + "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", + "tlsProfile": "tlsProfile is the TLS connection configuration that is in effect.", + "observedGeneration": "observedGeneration is the most recent generation observed.", +} + +func (IngressControllerStatus) SwaggerDoc() map[string]string { + return map_IngressControllerStatus +} + +var map_IngressControllerTuningOptions = map[string]string{ + "": "IngressControllerTuningOptions specifies options for tuning the performance of ingress controller pods", + "headerBufferBytes": "headerBufferBytes describes how much memory should be reserved (in bytes) for IngressController connection sessions. Note that this value must be at least 16384 if HTTP/2 is enabled for the IngressController (https://tools.ietf.org/html/rfc7540). If this field is empty, the IngressController will use a default value of 32768 bytes.\n\nSetting this field is generally not recommended as headerBufferBytes values that are too small may break the IngressController and headerBufferBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", + "headerBufferMaxRewriteBytes": "headerBufferMaxRewriteBytes describes how much memory should be reserved (in bytes) from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Note that incoming HTTP requests will be limited to (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning headerBufferBytes must be greater than headerBufferMaxRewriteBytes. If this field is empty, the IngressController will use a default value of 8192 bytes.\n\nSetting this field is generally not recommended as headerBufferMaxRewriteBytes values that are too small may break the IngressController and headerBufferMaxRewriteBytes values that are too large could cause the IngressController to use significantly more memory than necessary.", + "threadCount": "threadCount defines the number of threads created per HAProxy process. Creating more threads allows each ingress controller pod to handle more connections, at the cost of more system resources being used. HAProxy currently supports up to 64 threads. If this field is empty, the IngressController will use the default value. The current default is 4 threads, but this may change in future releases.\n\nSetting this field is generally not recommended. Increasing the number of HAProxy threads allows ingress controller pods to utilize more CPU time under load, potentially starving other pods if set too high. Reducing the number of threads may cause the ingress controller to perform poorly.", +} + +func (IngressControllerTuningOptions) SwaggerDoc() map[string]string { + return map_IngressControllerTuningOptions +} + +var map_LoadBalancerStrategy = map[string]string{ + "": "LoadBalancerStrategy holds parameters for a load balancer.", + "scope": "scope indicates the scope at which the load balancer is exposed. Possible values are \"External\" and \"Internal\".", + "providerParameters": "providerParameters holds desired load balancer information specific to the underlying infrastructure provider.\n\nIf empty, defaults will be applied. See specific providerParameters fields for details about their defaults.", +} + +func (LoadBalancerStrategy) SwaggerDoc() map[string]string { + return map_LoadBalancerStrategy +} + +var map_LoggingDestination = map[string]string{ + "": "LoggingDestination describes a destination for log messages.", + "type": "type is the type of destination for logs. It must be one of the following:\n\n* Container\n\nThe ingress operator configures the sidecar container named \"logs\" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity.\n\n* Syslog\n\nLogs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance.", + "syslog": "syslog holds parameters for a syslog endpoint. Present only if type is Syslog.", + "container": "container holds parameters for the Container logging destination. Present only if type is Container.", +} + +func (LoggingDestination) SwaggerDoc() map[string]string { + return map_LoggingDestination +} + +var map_NodePlacement = map[string]string{ + "": "NodePlacement describes node scheduling configuration for an ingress controller.", + "nodeSelector": "nodeSelector is the node selector applied to ingress controller deployments.\n\nIf unset, the default is:\n\n kubernetes.io/os: linux\n node-role.kubernetes.io/worker: ''\n\nIf set, the specified selector is used and replaces the default.", + "tolerations": "tolerations is a list of tolerations applied to ingress controller deployments.\n\nThe default is an empty list.\n\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/", +} + +func (NodePlacement) SwaggerDoc() map[string]string { + return map_NodePlacement +} + +var map_NodePortStrategy = map[string]string{ + "": "NodePortStrategy holds parameters for the NodePortService endpoint publishing strategy.", + "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", +} + +func (NodePortStrategy) SwaggerDoc() map[string]string { + return map_NodePortStrategy +} + +var map_PrivateStrategy = map[string]string{ + "": "PrivateStrategy holds parameters for the Private endpoint publishing strategy.", +} + +func (PrivateStrategy) SwaggerDoc() map[string]string { + return map_PrivateStrategy +} + +var map_ProviderLoadBalancerParameters = map[string]string{ + "": "ProviderLoadBalancerParameters holds desired load balancer information specific to the underlying infrastructure provider.", + "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"OpenStack\", and \"VSphere\".", + "aws": "aws provides configuration settings that are specific to AWS load balancers.\n\nIf empty, defaults will be applied. See specific aws fields for details about their defaults.", + "gcp": "gcp provides configuration settings that are specific to GCP load balancers.\n\nIf empty, defaults will be applied. See specific gcp fields for details about their defaults.", +} + +func (ProviderLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_ProviderLoadBalancerParameters +} + +var map_RouteAdmissionPolicy = map[string]string{ + "": "RouteAdmissionPolicy is an admission policy for allowing new route claims.", + "namespaceOwnership": "namespaceOwnership describes how host name claims across namespaces should be handled.\n\nValue must be one of:\n\n- Strict: Do not allow routes in different namespaces to claim the same host.\n\n- InterNamespaceAllowed: Allow routes to claim different paths of the same\n host name across namespaces.\n\nIf empty, the default is Strict.", + "wildcardPolicy": "wildcardPolicy describes how routes with wildcard policies should be handled for the ingress controller. WildcardPolicy controls use of routes [1] exposed by the ingress controller based on the route's wildcard policy.\n\n[1] https://github.com/openshift/api/blob/master/route/v1/types.go\n\nNote: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed will cause admitted routes with a wildcard policy of Subdomain to stop working. These routes must be updated to a wildcard policy of None to be readmitted by the ingress controller.\n\nWildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values.\n\nIf empty, defaults to \"WildcardsDisallowed\".", +} + +func (RouteAdmissionPolicy) SwaggerDoc() map[string]string { + return map_RouteAdmissionPolicy +} + +var map_SyslogLoggingDestinationParameters = map[string]string{ + "": "SyslogLoggingDestinationParameters describes parameters for the Syslog logging destination type.", + "address": "address is the IP address of the syslog endpoint that receives log messages.", + "port": "port is the UDP port number of the syslog endpoint that receives log messages.", + "facility": "facility specifies the syslog facility of log messages.\n\nIf this field is empty, the facility is \"local1\".", +} + +func (SyslogLoggingDestinationParameters) SwaggerDoc() map[string]string { + return map_SyslogLoggingDestinationParameters +} + +var map_KubeAPIServer = map[string]string{ + "": "KubeAPIServer provides information to configure an operator to manage kube-apiserver.", + "spec": "spec is the specification of the desired behavior of the Kubernetes API Server", + "status": "status is the most recently observed status of the Kubernetes API Server", +} + +func (KubeAPIServer) SwaggerDoc() map[string]string { + return map_KubeAPIServer +} + +var map_KubeAPIServerList = map[string]string{ + "": "KubeAPIServerList is a collection of items", + "items": "Items contains the items", +} + +func (KubeAPIServerList) SwaggerDoc() map[string]string { + return map_KubeAPIServerList +} + +var map_KubeControllerManager = map[string]string{ + "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.", + "spec": "spec is the specification of the desired behavior of the Kubernetes Controller Manager", + "status": "status is the most recently observed status of the Kubernetes Controller Manager", +} + +func (KubeControllerManager) SwaggerDoc() map[string]string { + return map_KubeControllerManager +} + +var map_KubeControllerManagerList = map[string]string{ + "": "KubeControllerManagerList is a collection of items", + "items": "Items contains the items", +} + +func (KubeControllerManagerList) SwaggerDoc() map[string]string { + return map_KubeControllerManagerList +} + +var map_KubeStorageVersionMigrator = map[string]string{ + "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.", +} + +func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigrator +} + +var map_KubeStorageVersionMigratorList = map[string]string{ + "": "KubeStorageVersionMigratorList is a collection of items", + "items": "Items contains the items", +} + +func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { + return map_KubeStorageVersionMigratorList +} + +var map_AdditionalNetworkDefinition = map[string]string{ + "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan", + "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", + "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.", + "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", + "simpleMacvlanConfig": "SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan", +} + +func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string { + return map_AdditionalNetworkDefinition +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_DefaultNetworkDefinition = map[string]string{ + "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", + "openshiftSDNConfig": "openShiftSDNConfig configures the openshift-sdn plugin", + "ovnKubernetesConfig": "oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently not implemented.", + "kuryrConfig": "KuryrConfig configures the kuryr plugin", +} + +func (DefaultNetworkDefinition) SwaggerDoc() map[string]string { + return map_DefaultNetworkDefinition +} + +var map_ExportNetworkFlows = map[string]string{ + "netFlow": "netFlow defines the NetFlow configuration.", + "sFlow": "sFlow defines the SFlow configuration.", + "ipfix": "ipfix defines IPFIX configuration.", +} + +func (ExportNetworkFlows) SwaggerDoc() map[string]string { + return map_ExportNetworkFlows +} + +var map_HybridOverlayConfig = map[string]string{ + "hybridClusterNetwork": "HybridClusterNetwork defines a network space given to nodes on an additional overlay network.", + "hybridOverlayVXLANPort": "HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789", +} + +func (HybridOverlayConfig) SwaggerDoc() map[string]string { + return map_HybridOverlayConfig +} + +var map_IPAMConfig = map[string]string{ + "": "IPAMConfig contains configurations for IPAM (IP Address Management)", + "type": "Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic", + "staticIPAMConfig": "StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic", +} + +func (IPAMConfig) SwaggerDoc() map[string]string { + return map_IPAMConfig +} + +var map_IPFIXConfig = map[string]string{ + "collectors": "ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items", +} + +func (IPFIXConfig) SwaggerDoc() map[string]string { + return map_IPFIXConfig +} + +var map_KuryrConfig = map[string]string{ + "": "KuryrConfig configures the Kuryr-Kubernetes SDN", + "daemonProbesPort": "The port kuryr-daemon will listen for readiness and liveness requests.", + "controllerProbesPort": "The port kuryr-controller will listen for readiness and liveness requests.", + "openStackServiceNetwork": "openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1.", + "enablePortPoolsPrepopulation": "enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. Instead of creating it when pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By default port prepopulation is disabled.", + "poolMaxPorts": "poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting.", + "poolMinPorts": "poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting.", + "poolBatchPorts": "poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting.", + "mtu": "mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has.", +} + +func (KuryrConfig) SwaggerDoc() map[string]string { + return map_KuryrConfig +} + +var map_NetFlowConfig = map[string]string{ + "collectors": "netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items", +} + +func (NetFlowConfig) SwaggerDoc() map[string]string { + return map_NetFlowConfig +} + +var map_Network = map[string]string{ + "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkList = map[string]string{ + "": "NetworkList contains a list of Network configurations", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the cluster network configuration.", + "networkType": "networkType is the target type of network migration The supported values are OpenShiftSDN, OVNKubernetes", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the top-level network configuration object.", + "clusterNetwork": "clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr.", + "serviceNetwork": "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.", + "defaultNetwork": "defaultNetwork is the \"default\" network that all pods will receive", + "additionalNetworks": "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.", + "disableMultiNetwork": "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.", + "useMultiNetworkPolicy": "useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored.", + "deployKubeProxy": "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise.", + "disableNetworkDiagnostics": "disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks.", + "kubeProxyConfig": "kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn.", + "exportNetworkFlows": "exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector.", + "migration": "migration enables and configures the cluster network migration. Setting this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object.", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_OVNKubernetesConfig = map[string]string{ + "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", + "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", + "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081", + "hybridOverlayConfig": "HybridOverlayConfig configures an additional overlay network for peers that are not using OVN.", + "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", + "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", +} + +func (OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_OVNKubernetesConfig +} + +var map_OpenShiftSDNConfig = map[string]string{ + "": "OpenShiftSDNConfig configures the three openshift-sdn plugins", + "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", + "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.", + "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", + "useExternalOpenvswitch": "useExternalOpenvswitch tells the operator not to install openvswitch, because it will be provided separately. If set, you must provide it yourself.", + "enableUnidling": "enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled.", +} + +func (OpenShiftSDNConfig) SwaggerDoc() map[string]string { + return map_OpenShiftSDNConfig +} + +var map_PolicyAuditConfig = map[string]string{ + "rateLimit": "rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used.", + "maxFileSize": "maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB", + "destination": "destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - \"libc\" -> to use the libc syslog() function of the host node's journdald process - \"udp:host:port\" -> for sending syslog over UDP - \"unix:file\" -> for using the UNIX domain socket directly - \"null\" -> to discard all messages logged to syslog The default is \"null\"", + "syslogFacility": "syslogFacility the RFC5424 facility for generated messages, e.g. \"kern\". Default is \"local0\"", +} + +func (PolicyAuditConfig) SwaggerDoc() map[string]string { + return map_PolicyAuditConfig +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines the configuration knobs for kubeproxy All of these are optional and have sensible defaults", + "iptablesSyncPeriod": "An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s", + "bindAddress": "The address to \"bind\" on Defaults to 0.0.0.0", + "proxyArguments": "Any additional arguments to pass to the kubeproxy process", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_SFlowConfig = map[string]string{ + "collectors": "sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items", +} + +func (SFlowConfig) SwaggerDoc() map[string]string { + return map_SFlowConfig +} + +var map_SimpleMacvlanConfig = map[string]string{ + "": "SimpleMacvlanConfig contains configurations for macvlan interface.", + "master": "master is the host interface to create the macvlan interface from. If not specified, it will be default route interface", + "ipamConfig": "IPAMConfig configures IPAM module will be used for IP Address Management (IPAM).", + "mode": "mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge", + "mtu": "mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value.", +} + +func (SimpleMacvlanConfig) SwaggerDoc() map[string]string { + return map_SimpleMacvlanConfig +} + +var map_StaticIPAMAddresses = map[string]string{ + "": "StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses", + "address": "Address is the IP address in CIDR format", + "gateway": "Gateway is IP inside of subnet to designate as the gateway", +} + +func (StaticIPAMAddresses) SwaggerDoc() map[string]string { + return map_StaticIPAMAddresses +} + +var map_StaticIPAMConfig = map[string]string{ + "": "StaticIPAMConfig contains configurations for static IPAM (IP Address Management)", + "addresses": "Addresses configures IP address for the interface", + "routes": "Routes configures IP routes for the interface", + "dns": "DNS configures DNS for the interface", +} + +func (StaticIPAMConfig) SwaggerDoc() map[string]string { + return map_StaticIPAMConfig +} + +var map_StaticIPAMDNS = map[string]string{ + "": "StaticIPAMDNS provides DNS related information for static IPAM", + "nameservers": "Nameservers points DNS servers for IP lookup", + "domain": "Domain configures the domainname the local domain used for short hostname lookups", + "search": "Search configures priority ordered search domains for short hostname lookups", +} + +func (StaticIPAMDNS) SwaggerDoc() map[string]string { + return map_StaticIPAMDNS +} + +var map_StaticIPAMRoutes = map[string]string{ + "": "StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes", + "destination": "Destination points the IP route destination", + "gateway": "Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin).", +} + +func (StaticIPAMRoutes) SwaggerDoc() map[string]string { + return map_StaticIPAMRoutes +} + +var map_OpenShiftAPIServer = map[string]string{ + "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.", + "spec": "spec is the specification of the desired behavior of the OpenShift API Server.", + "status": "status defines the observed status of the OpenShift API Server.", +} + +func (OpenShiftAPIServer) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServer +} + +var map_OpenShiftAPIServerList = map[string]string{ + "": "OpenShiftAPIServerList is a collection of items", + "items": "Items contains the items", +} + +func (OpenShiftAPIServerList) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerList +} + +var map_OpenShiftAPIServerStatus = map[string]string{ + "latestAvailableRevision": "latestAvailableRevision is the latest revision used as suffix of revisioned secrets like encryption-config. A new revision causes a new deployment of pods.", +} + +func (OpenShiftAPIServerStatus) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerStatus +} + +var map_OpenShiftControllerManager = map[string]string{ + "": "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.", +} + +func (OpenShiftControllerManager) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManager +} + +var map_OpenShiftControllerManagerList = map[string]string{ + "": "OpenShiftControllerManagerList is a collection of items", + "items": "Items contains the items", +} + +func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManagerList +} + +var map_KubeScheduler = map[string]string{ + "": "KubeScheduler provides information to configure an operator to manage scheduler.", + "spec": "spec is the specification of the desired behavior of the Kubernetes Scheduler", + "status": "status is the most recently observed status of the Kubernetes Scheduler", +} + +func (KubeScheduler) SwaggerDoc() map[string]string { + return map_KubeScheduler +} + +var map_KubeSchedulerList = map[string]string{ + "": "KubeSchedulerList is a collection of items", + "items": "Items contains the items", +} + +func (KubeSchedulerList) SwaggerDoc() map[string]string { + return map_KubeSchedulerList +} + +var map_ServiceCA = map[string]string{ + "": "ServiceCA provides information to configure an operator to manage the service cert controllers", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (ServiceCA) SwaggerDoc() map[string]string { + return map_ServiceCA +} + +var map_ServiceCAList = map[string]string{ + "": "ServiceCAList is a collection of items", + "items": "Items contains the items", +} + +func (ServiceCAList) SwaggerDoc() map[string]string { + return map_ServiceCAList +} + +var map_ServiceCatalogAPIServer = map[string]string{ + "": "ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server DEPRECATED: will be removed in 4.6", +} + +func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServer +} + +var map_ServiceCatalogAPIServerList = map[string]string{ + "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6", + "items": "Items contains the items", +} + +func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogAPIServerList +} + +var map_ServiceCatalogControllerManager = map[string]string{ + "": "ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager DEPRECATED: will be removed in 4.6", +} + +func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManager +} + +var map_ServiceCatalogControllerManagerList = map[string]string{ + "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6", + "items": "Items contains the items", +} + +func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { + return map_ServiceCatalogControllerManagerList +} + +var map_Storage = map[string]string{ + "": "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Storage) SwaggerDoc() map[string]string { + return map_Storage +} + +var map_StorageList = map[string]string{ + "": "StorageList contains a list of Storages.", +} + +func (StorageList) SwaggerDoc() map[string]string { + return map_StorageList +} + +var map_StorageSpec = map[string]string{ + "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", +} + +func (StorageSpec) SwaggerDoc() map[string]string { + return map_StorageSpec +} + +var map_StorageStatus = map[string]string{ + "": "StorageStatus defines the observed status of the cluster storage operator.", +} + +func (StorageStatus) SwaggerDoc() map[string]string { + return map_StorageStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml new file mode 100644 index 0000000000..b160c5bace --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml @@ -0,0 +1,94 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: imagecontentsourcepolicies.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ImageContentSourcePolicy + listKind: ImageContentSourcePolicyList + plural: imagecontentsourcepolicies + singular: imagecontentsourcepolicy + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ImageContentSourcePolicy holds cluster-wide information about + how to handle registry mirror rules. When multiple policies are defined, + the outcome of the behavior is defined on each field. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + repositoryDigestMirrors: + description: "repositoryDigestMirrors allows images referenced by + image digests in pods to be pulled from alternative mirrored repository + locations. The image pull specification provided to the pod will + be compared to the source locations described in RepositoryDigestMirrors + and the image may be pulled down from any of the mirrors in the + list instead of the specified repository allowing administrators + to choose a potentially faster mirror. Only image pull specifications + that have an image digest will have this behavior applied to them + - tags will continue to be pulled from the specified repository + in the pull spec. \n Each “source” repository is treated independently; + configurations for different “source” repositories don’t interact. + \n When multiple policies are defined for the same “source” repository, + the sets of defined mirrors will be merged together, preserving + the relative order of the mirrors, if possible. For example, if + policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, + the mirrors will be used in the order `a, b, c, d, e`. If the orders + of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration + is not rejected but the resulting order is unspecified." + type: array + items: + description: 'RepositoryDigestMirrors holds cluster-wide information + about how to handle mirros in the registries config. Note: the + mirrors only work when pulling the images that are referenced + by their digests.' + type: object + required: + - source + properties: + mirrors: + description: mirrors is one or more repositories that may also + contain the same images. The order of mirrors in this list + is treated as the user's desired priority, while source is + by default considered lower priority than all mirrors. Other + cluster configuration, including (but not limited to) other + repositoryDigestMirrors objects, may impact the exact order + mirrors are contacted in, or some mirrors may be contacted + in parallel, so this should be considered a preference rather + than a guarantee of ordering. + type: array + items: + type: string + source: + description: source is the repository that users refer to, e.g. + in image pull specifications. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/doc.go b/vendor/github.com/openshift/api/operator/v1alpha1/doc.go new file mode 100644 index 0000000000..9d18719532 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=operator.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/register.go b/vendor/github.com/openshift/api/operator/v1alpha1/register.go new file mode 100644 index 0000000000..3c731f6187 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/register.go @@ -0,0 +1,41 @@ +package v1alpha1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &GenericOperatorConfig{}, + &ImageContentSourcePolicy{}, + &ImageContentSourcePolicyList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types.go b/vendor/github.com/openshift/api/operator/v1alpha1/types.go new file mode 100644 index 0000000000..8f2e5be243 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types.go @@ -0,0 +1,180 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" +) + +type ManagementState string + +const ( + // Managed means that the operator is actively managing its resources and trying to keep the component active + Managed ManagementState = "Managed" + // Unmanaged means that the operator is not taking any action related to the component + Unmanaged ManagementState = "Unmanaged" + // Removed means that the operator is actively managing its resources and trying to remove all traces of the component + Removed ManagementState = "Removed" +) + +// OperatorSpec contains common fields for an operator to need. It is intended to be anonymous included +// inside of the Spec struct for you particular operator. +type OperatorSpec struct { + // managementState indicates whether and how the operator should manage the component + ManagementState ManagementState `json:"managementState"` + + // imagePullSpec is the image to use for the component. + ImagePullSpec string `json:"imagePullSpec"` + + // imagePullPolicy specifies the image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, + // or IfNotPresent otherwise. + ImagePullPolicy string `json:"imagePullPolicy"` + + // version is the desired state in major.minor.micro-patch. Usually patch is ignored. + Version string `json:"version"` + + // logging contains glog parameters for the component pods. It's always a command line arg for the moment + Logging LoggingConfig `json:"logging,omitempty"` +} + +// LoggingConfig holds information about configuring logging +type LoggingConfig struct { + // level is passed to glog. + Level int64 `json:"level"` + + // vmodule is passed to glog. + Vmodule string `json:"vmodule"` +} + +type ConditionStatus string + +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" + + // these conditions match the conditions for the ClusterOperator type. + OperatorStatusTypeAvailable = "Available" + OperatorStatusTypeProgressing = "Progressing" + OperatorStatusTypeFailing = "Failing" + + OperatorStatusTypeMigrating = "Migrating" + // TODO this is going to be removed + OperatorStatusTypeSyncSuccessful = "SyncSuccessful" +) + +// OperatorCondition is just the standard condition fields. +type OperatorCondition struct { + Type string `json:"type"` + Status ConditionStatus `json:"status"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +// VersionAvailability gives information about the synchronization and operational status of a particular version of the component +type VersionAvailability struct { + // version is the level this availability applies to + Version string `json:"version"` + // updatedReplicas indicates how many replicas are at the desired state + UpdatedReplicas int32 `json:"updatedReplicas"` + // readyReplicas indicates how many replicas are ready and at the desired state + ReadyReplicas int32 `json:"readyReplicas"` + // errors indicates what failures are associated with the operator trying to manage this version + Errors []string `json:"errors"` + // generations allows an operator to track what the generation of "important" resources was the last time we updated them + Generations []GenerationHistory `json:"generations"` +} + +// GenerationHistory keeps track of the generation for a given resource so that decisions about forced updated can be made. +type GenerationHistory struct { + // group is the group of the thing you're tracking + Group string `json:"group"` + // resource is the resource type of the thing you're tracking + Resource string `json:"resource"` + // namespace is where the thing you're tracking is + Namespace string `json:"namespace"` + // name is the name of the thing you're tracking + Name string `json:"name"` + // lastGeneration is the last generation of the workload controller involved + LastGeneration int64 `json:"lastGeneration"` +} + +// OperatorStatus contains common fields for an operator to need. It is intended to be anonymous included +// inside of the Status struct for you particular operator. +type OperatorStatus struct { + // observedGeneration is the last generation change you've dealt with + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // conditions is a list of conditions and their status + Conditions []OperatorCondition `json:"conditions,omitempty"` + + // state indicates what the operator has observed to be its current operational status. + State ManagementState `json:"state,omitempty"` + // taskSummary is a high level summary of what the controller is currently attempting to do. It is high-level, human-readable + // and not guaranteed in any way. (I needed this for debugging and realized it made a great summary). + TaskSummary string `json:"taskSummary,omitempty"` + + // currentVersionAvailability is availability information for the current version. If it is unmanged or removed, this doesn't exist. + CurrentAvailability *VersionAvailability `json:"currentVersionAvailability,omitempty"` + // targetVersionAvailability is availability information for the target version if we are migrating + TargetAvailability *VersionAvailability `json:"targetVersionAvailability,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GenericOperatorConfig provides information to configure an operator +type GenericOperatorConfig struct { + metav1.TypeMeta `json:",inline"` + + // ServingInfo is the HTTP serving information for the controller's endpoints + ServingInfo configv1.HTTPServingInfo `json:"servingInfo,omitempty"` + + // leaderElection provides information to elect a leader. Only override this if you have a specific need + LeaderElection configv1.LeaderElection `json:"leaderElection,omitempty"` + + // authentication allows configuration of authentication for the endpoints + Authentication DelegatedAuthentication `json:"authentication,omitempty"` + // authorization allows configuration of authentication for the endpoints + Authorization DelegatedAuthorization `json:"authorization,omitempty"` +} + +// DelegatedAuthentication allows authentication to be disabled. +type DelegatedAuthentication struct { + // disabled indicates that authentication should be disabled. By default it will use delegated authentication. + Disabled bool `json:"disabled,omitempty"` +} + +// DelegatedAuthorization allows authorization to be disabled. +type DelegatedAuthorization struct { + // disabled indicates that authorization should be disabled. By default it will use delegated authorization. + Disabled bool `json:"disabled,omitempty"` +} + +// StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual +// node status must be tracked. +type StaticPodOperatorStatus struct { + OperatorStatus `json:",inline"` + + // latestAvailableDeploymentGeneration is the deploymentID of the most recent deployment + LatestAvailableDeploymentGeneration int32 `json:"latestAvailableDeploymentGeneration"` + + // nodeStatuses track the deployment values and errors across individual nodes + NodeStatuses []NodeStatus `json:"nodeStatuses"` +} + +// NodeStatus provides information about the current state of a particular node managed by this operator. +type NodeStatus struct { + // nodeName is the name of the node + NodeName string `json:"nodeName"` + + // currentDeploymentGeneration is the generation of the most recently successful deployment + CurrentDeploymentGeneration int32 `json:"currentDeploymentGeneration"` + // targetDeploymentGeneration is the generation of the deployment we're trying to apply + TargetDeploymentGeneration int32 `json:"targetDeploymentGeneration"` + // lastFailedDeploymentGeneration is the generation of the deployment we tried and failed to deploy. + LastFailedDeploymentGeneration int32 `json:"lastFailedDeploymentGeneration"` + + // lastFailedDeploymentGenerationErrors is a list of the errors during the failed deployment referenced in lastFailedDeploymentGeneration + LastFailedDeploymentErrors []string `json:"lastFailedDeploymentErrors"` +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go new file mode 100644 index 0000000000..29345e7d90 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go @@ -0,0 +1,67 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentSourcePolicy holds cluster-wide information about how to handle registry mirror rules. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +type ImageContentSourcePolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageContentSourcePolicySpec `json:"spec"` +} + +// ImageContentSourcePolicySpec is the specification of the ImageContentSourcePolicy CRD. +type ImageContentSourcePolicySpec struct { + // repositoryDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // Only image pull specifications that have an image digest will have this behavior applied + // to them - tags will continue to be pulled from the specified repository in the pull spec. + // + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // +optional + RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentSourcePolicyList lists the items in the ImageContentSourcePolicy CRD. +type ImageContentSourcePolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ImageContentSourcePolicy `json:"items"` +} + +// RepositoryDigestMirrors holds cluster-wide information about how to handle mirros in the registries config. +// Note: the mirrors only work when pulling the images that are referenced by their digests. +type RepositoryDigestMirrors struct { + // source is the repository that users refer to, e.g. in image pull specifications. + // +required + Source string `json:"source"` + // mirrors is one or more repositories that may also contain the same images. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. Other cluster configuration, + // including (but not limited to) other repositoryDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // +optional + Mirrors []string `json:"mirrors"` +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..79f75bd0be --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,344 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication. +func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication { + if in == nil { + return nil + } + out := new(DelegatedAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization. +func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization { + if in == nil { + return nil + } + out := new(DelegatedAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerationHistory) DeepCopyInto(out *GenerationHistory) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationHistory. +func (in *GenerationHistory) DeepCopy() *GenerationHistory { + if in == nil { + return nil + } + out := new(GenerationHistory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericOperatorConfig) DeepCopyInto(out *GenericOperatorConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + out.LeaderElection = in.LeaderElection + out.Authentication = in.Authentication + out.Authorization = in.Authorization + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericOperatorConfig. +func (in *GenericOperatorConfig) DeepCopy() *GenericOperatorConfig { + if in == nil { + return nil + } + out := new(GenericOperatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GenericOperatorConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentSourcePolicy) DeepCopyInto(out *ImageContentSourcePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSourcePolicy. +func (in *ImageContentSourcePolicy) DeepCopy() *ImageContentSourcePolicy { + if in == nil { + return nil + } + out := new(ImageContentSourcePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentSourcePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentSourcePolicyList) DeepCopyInto(out *ImageContentSourcePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageContentSourcePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSourcePolicyList. +func (in *ImageContentSourcePolicyList) DeepCopy() *ImageContentSourcePolicyList { + if in == nil { + return nil + } + out := new(ImageContentSourcePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentSourcePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentSourcePolicySpec) DeepCopyInto(out *ImageContentSourcePolicySpec) { + *out = *in + if in.RepositoryDigestMirrors != nil { + in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors + *out = make([]RepositoryDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSourcePolicySpec. +func (in *ImageContentSourcePolicySpec) DeepCopy() *ImageContentSourcePolicySpec { + if in == nil { + return nil + } + out := new(ImageContentSourcePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfig) DeepCopyInto(out *LoggingConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfig. +func (in *LoggingConfig) DeepCopy() *LoggingConfig { + if in == nil { + return nil + } + out := new(LoggingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.LastFailedDeploymentErrors != nil { + in, out := &in.LastFailedDeploymentErrors, &out.LastFailedDeploymentErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorCondition. +func (in *OperatorCondition) DeepCopy() *OperatorCondition { + if in == nil { + return nil + } + out := new(OperatorCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { + *out = *in + out.Logging = in.Logging + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. +func (in *OperatorSpec) DeepCopy() *OperatorSpec { + if in == nil { + return nil + } + out := new(OperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CurrentAvailability != nil { + in, out := &in.CurrentAvailability, &out.CurrentAvailability + *out = new(VersionAvailability) + (*in).DeepCopyInto(*out) + } + if in.TargetAvailability != nil { + in, out := &in.TargetAvailability, &out.TargetAvailability + *out = new(VersionAvailability) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus. +func (in *OperatorStatus) DeepCopy() *OperatorStatus { + if in == nil { + return nil + } + out := new(OperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors. +func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors { + if in == nil { + return nil + } + out := new(RepositoryDigestMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticPodOperatorStatus) DeepCopyInto(out *StaticPodOperatorStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + if in.NodeStatuses != nil { + in, out := &in.NodeStatuses, &out.NodeStatuses + *out = make([]NodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticPodOperatorStatus. +func (in *StaticPodOperatorStatus) DeepCopy() *StaticPodOperatorStatus { + if in == nil { + return nil + } + out := new(StaticPodOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionAvailability) DeepCopyInto(out *VersionAvailability) { + *out = *in + if in.Errors != nil { + in, out := &in.Errors, &out.Errors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Generations != nil { + in, out := &in.Generations, &out.Generations + *out = make([]GenerationHistory, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionAvailability. +func (in *VersionAvailability) DeepCopy() *VersionAvailability { + if in == nil { + return nil + } + out := new(VersionAvailability) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..7af7159552 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,174 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DelegatedAuthentication = map[string]string{ + "": "DelegatedAuthentication allows authentication to be disabled.", + "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.", +} + +func (DelegatedAuthentication) SwaggerDoc() map[string]string { + return map_DelegatedAuthentication +} + +var map_DelegatedAuthorization = map[string]string{ + "": "DelegatedAuthorization allows authorization to be disabled.", + "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.", +} + +func (DelegatedAuthorization) SwaggerDoc() map[string]string { + return map_DelegatedAuthorization +} + +var map_GenerationHistory = map[string]string{ + "": "GenerationHistory keeps track of the generation for a given resource so that decisions about forced updated can be made.", + "group": "group is the group of the thing you're tracking", + "resource": "resource is the resource type of the thing you're tracking", + "namespace": "namespace is where the thing you're tracking is", + "name": "name is the name of the thing you're tracking", + "lastGeneration": "lastGeneration is the last generation of the workload controller involved", +} + +func (GenerationHistory) SwaggerDoc() map[string]string { + return map_GenerationHistory +} + +var map_GenericOperatorConfig = map[string]string{ + "": "GenericOperatorConfig provides information to configure an operator", + "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", + "authentication": "authentication allows configuration of authentication for the endpoints", + "authorization": "authorization allows configuration of authentication for the endpoints", +} + +func (GenericOperatorConfig) SwaggerDoc() map[string]string { + return map_GenericOperatorConfig +} + +var map_LoggingConfig = map[string]string{ + "": "LoggingConfig holds information about configuring logging", + "level": "level is passed to glog.", + "vmodule": "vmodule is passed to glog.", +} + +func (LoggingConfig) SwaggerDoc() map[string]string { + return map_LoggingConfig +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus provides information about the current state of a particular node managed by this operator.", + "nodeName": "nodeName is the name of the node", + "currentDeploymentGeneration": "currentDeploymentGeneration is the generation of the most recently successful deployment", + "targetDeploymentGeneration": "targetDeploymentGeneration is the generation of the deployment we're trying to apply", + "lastFailedDeploymentGeneration": "lastFailedDeploymentGeneration is the generation of the deployment we tried and failed to deploy.", + "lastFailedDeploymentErrors": "lastFailedDeploymentGenerationErrors is a list of the errors during the failed deployment referenced in lastFailedDeploymentGeneration", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_OperatorCondition = map[string]string{ + "": "OperatorCondition is just the standard condition fields.", +} + +func (OperatorCondition) SwaggerDoc() map[string]string { + return map_OperatorCondition +} + +var map_OperatorSpec = map[string]string{ + "": "OperatorSpec contains common fields for an operator to need. It is intended to be anonymous included inside of the Spec struct for you particular operator.", + "managementState": "managementState indicates whether and how the operator should manage the component", + "imagePullSpec": "imagePullSpec is the image to use for the component.", + "imagePullPolicy": "imagePullPolicy specifies the image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", + "version": "version is the desired state in major.minor.micro-patch. Usually patch is ignored.", + "logging": "logging contains glog parameters for the component pods. It's always a command line arg for the moment", +} + +func (OperatorSpec) SwaggerDoc() map[string]string { + return map_OperatorSpec +} + +var map_OperatorStatus = map[string]string{ + "": "OperatorStatus contains common fields for an operator to need. It is intended to be anonymous included inside of the Status struct for you particular operator.", + "observedGeneration": "observedGeneration is the last generation change you've dealt with", + "conditions": "conditions is a list of conditions and their status", + "state": "state indicates what the operator has observed to be its current operational status.", + "taskSummary": "taskSummary is a high level summary of what the controller is currently attempting to do. It is high-level, human-readable and not guaranteed in any way. (I needed this for debugging and realized it made a great summary).", + "currentVersionAvailability": "currentVersionAvailability is availability information for the current version. If it is unmanged or removed, this doesn't exist.", + "targetVersionAvailability": "targetVersionAvailability is availability information for the target version if we are migrating", +} + +func (OperatorStatus) SwaggerDoc() map[string]string { + return map_OperatorStatus +} + +var map_StaticPodOperatorStatus = map[string]string{ + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "latestAvailableDeploymentGeneration": "latestAvailableDeploymentGeneration is the deploymentID of the most recent deployment", + "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", +} + +func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { + return map_StaticPodOperatorStatus +} + +var map_VersionAvailability = map[string]string{ + "": "VersionAvailability gives information about the synchronization and operational status of a particular version of the component", + "version": "version is the level this availability applies to", + "updatedReplicas": "updatedReplicas indicates how many replicas are at the desired state", + "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state", + "errors": "errors indicates what failures are associated with the operator trying to manage this version", + "generations": "generations allows an operator to track what the generation of \"important\" resources was the last time we updated them", +} + +func (VersionAvailability) SwaggerDoc() map[string]string { + return map_VersionAvailability +} + +var map_ImageContentSourcePolicy = map[string]string{ + "": "ImageContentSourcePolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.", + "spec": "spec holds user settable values for configuration", +} + +func (ImageContentSourcePolicy) SwaggerDoc() map[string]string { + return map_ImageContentSourcePolicy +} + +var map_ImageContentSourcePolicyList = map[string]string{ + "": "ImageContentSourcePolicyList lists the items in the ImageContentSourcePolicy CRD.", +} + +func (ImageContentSourcePolicyList) SwaggerDoc() map[string]string { + return map_ImageContentSourcePolicyList +} + +var map_ImageContentSourcePolicySpec = map[string]string{ + "": "ImageContentSourcePolicySpec is the specification of the ImageContentSourcePolicy CRD.", + "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. Only image pull specifications that have an image digest will have this behavior applied to them - tags will continue to be pulled from the specified repository in the pull spec.\n\nEach “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.", +} + +func (ImageContentSourcePolicySpec) SwaggerDoc() map[string]string { + return map_ImageContentSourcePolicySpec +} + +var map_RepositoryDigestMirrors = map[string]string{ + "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirros in the registries config. Note: the mirrors only work when pulling the images that are referenced by their digests.", + "source": "source is the repository that users refer to, e.g. in image pull specifications.", + "mirrors": "mirrors is one or more repositories that may also contain the same images. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.", +} + +func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { + return map_RepositoryDigestMirrors +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/clientset.go new file mode 100644 index 0000000000..f9c0656e0a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/clientset.go @@ -0,0 +1,95 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + operatorv1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1" + operatorv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + OperatorV1() operatorv1.OperatorV1Interface + OperatorV1alpha1() operatorv1alpha1.OperatorV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + operatorV1 *operatorv1.OperatorV1Client + operatorV1alpha1 *operatorv1alpha1.OperatorV1alpha1Client +} + +// OperatorV1 retrieves the OperatorV1Client +func (c *Clientset) OperatorV1() operatorv1.OperatorV1Interface { + return c.operatorV1 +} + +// OperatorV1alpha1 retrieves the OperatorV1alpha1Client +func (c *Clientset) OperatorV1alpha1() operatorv1alpha1.OperatorV1alpha1Interface { + return c.operatorV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.operatorV1, err = operatorv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.operatorV1alpha1, err = operatorv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.operatorV1 = operatorv1.NewForConfigOrDie(c) + cs.operatorV1alpha1 = operatorv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.operatorV1 = operatorv1.New(c) + cs.operatorV1alpha1 = operatorv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/doc.go new file mode 100644 index 0000000000..0e0c2a8900 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 0000000000..d2fc3d301a --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,73 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/openshift/client-go/operator/clientset/versioned" + operatorv1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1" + fakeoperatorv1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake" + operatorv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" + fakeoperatorv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var _ clientset.Interface = &Clientset{} + +// OperatorV1 retrieves the OperatorV1Client +func (c *Clientset) OperatorV1() operatorv1.OperatorV1Interface { + return &fakeoperatorv1.FakeOperatorV1{Fake: &c.Fake} +} + +// OperatorV1alpha1 retrieves the OperatorV1alpha1Client +func (c *Clientset) OperatorV1alpha1() operatorv1alpha1.OperatorV1alpha1Interface { + return &fakeoperatorv1alpha1.FakeOperatorV1alpha1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/doc.go new file mode 100644 index 0000000000..3630ed1cd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/register.go new file mode 100644 index 0000000000..ede91af38e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/fake/register.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + operatorv1.AddToScheme, + operatorv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..14db57a58f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..0a99d662e9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/register.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + operatorv1.AddToScheme, + operatorv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/authentication.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/authentication.go new file mode 100644 index 0000000000..a7523869d4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/authentication.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// AuthenticationsGetter has a method to return a AuthenticationInterface. +// A group's client should implement this interface. +type AuthenticationsGetter interface { + Authentications() AuthenticationInterface +} + +// AuthenticationInterface has methods to work with Authentication resources. +type AuthenticationInterface interface { + Create(ctx context.Context, authentication *v1.Authentication, opts metav1.CreateOptions) (*v1.Authentication, error) + Update(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (*v1.Authentication, error) + UpdateStatus(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (*v1.Authentication, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Authentication, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.AuthenticationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Authentication, err error) + AuthenticationExpansion +} + +// authentications implements AuthenticationInterface +type authentications struct { + client rest.Interface +} + +// newAuthentications returns a Authentications +func newAuthentications(c *OperatorV1Client) *authentications { + return &authentications{ + client: c.RESTClient(), + } +} + +// Get takes name of the authentication, and returns the corresponding authentication object, and an error if there is any. +func (c *authentications) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Authentication, err error) { + result = &v1.Authentication{} + err = c.client.Get(). + Resource("authentications"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Authentications that match those selectors. +func (c *authentications) List(ctx context.Context, opts metav1.ListOptions) (result *v1.AuthenticationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.AuthenticationList{} + err = c.client.Get(). + Resource("authentications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested authentications. +func (c *authentications) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("authentications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a authentication and creates it. Returns the server's representation of the authentication, and an error, if there is any. +func (c *authentications) Create(ctx context.Context, authentication *v1.Authentication, opts metav1.CreateOptions) (result *v1.Authentication, err error) { + result = &v1.Authentication{} + err = c.client.Post(). + Resource("authentications"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(authentication). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a authentication and updates it. Returns the server's representation of the authentication, and an error, if there is any. +func (c *authentications) Update(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (result *v1.Authentication, err error) { + result = &v1.Authentication{} + err = c.client.Put(). + Resource("authentications"). + Name(authentication.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(authentication). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *authentications) UpdateStatus(ctx context.Context, authentication *v1.Authentication, opts metav1.UpdateOptions) (result *v1.Authentication, err error) { + result = &v1.Authentication{} + err = c.client.Put(). + Resource("authentications"). + Name(authentication.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(authentication). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the authentication and deletes it. Returns an error if one occurs. +func (c *authentications) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("authentications"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *authentications) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("authentications"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched authentication. +func (c *authentications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Authentication, err error) { + result = &v1.Authentication{} + err = c.client.Patch(pt). + Resource("authentications"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/cloudcredential.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/cloudcredential.go new file mode 100644 index 0000000000..f501305ced --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/cloudcredential.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CloudCredentialsGetter has a method to return a CloudCredentialInterface. +// A group's client should implement this interface. +type CloudCredentialsGetter interface { + CloudCredentials() CloudCredentialInterface +} + +// CloudCredentialInterface has methods to work with CloudCredential resources. +type CloudCredentialInterface interface { + Create(ctx context.Context, cloudCredential *v1.CloudCredential, opts metav1.CreateOptions) (*v1.CloudCredential, error) + Update(ctx context.Context, cloudCredential *v1.CloudCredential, opts metav1.UpdateOptions) (*v1.CloudCredential, error) + UpdateStatus(ctx context.Context, cloudCredential *v1.CloudCredential, opts metav1.UpdateOptions) (*v1.CloudCredential, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CloudCredential, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CloudCredentialList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CloudCredential, err error) + CloudCredentialExpansion +} + +// cloudCredentials implements CloudCredentialInterface +type cloudCredentials struct { + client rest.Interface +} + +// newCloudCredentials returns a CloudCredentials +func newCloudCredentials(c *OperatorV1Client) *cloudCredentials { + return &cloudCredentials{ + client: c.RESTClient(), + } +} + +// Get takes name of the cloudCredential, and returns the corresponding cloudCredential object, and an error if there is any. +func (c *cloudCredentials) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CloudCredential, err error) { + result = &v1.CloudCredential{} + err = c.client.Get(). + Resource("cloudcredentials"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CloudCredentials that match those selectors. +func (c *cloudCredentials) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CloudCredentialList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CloudCredentialList{} + err = c.client.Get(). + Resource("cloudcredentials"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cloudCredentials. +func (c *cloudCredentials) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("cloudcredentials"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cloudCredential and creates it. Returns the server's representation of the cloudCredential, and an error, if there is any. +func (c *cloudCredentials) Create(ctx context.Context, cloudCredential *v1.CloudCredential, opts metav1.CreateOptions) (result *v1.CloudCredential, err error) { + result = &v1.CloudCredential{} + err = c.client.Post(). + Resource("cloudcredentials"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cloudCredential). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cloudCredential and updates it. Returns the server's representation of the cloudCredential, and an error, if there is any. +func (c *cloudCredentials) Update(ctx context.Context, cloudCredential *v1.CloudCredential, opts metav1.UpdateOptions) (result *v1.CloudCredential, err error) { + result = &v1.CloudCredential{} + err = c.client.Put(). + Resource("cloudcredentials"). + Name(cloudCredential.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cloudCredential). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *cloudCredentials) UpdateStatus(ctx context.Context, cloudCredential *v1.CloudCredential, opts metav1.UpdateOptions) (result *v1.CloudCredential, err error) { + result = &v1.CloudCredential{} + err = c.client.Put(). + Resource("cloudcredentials"). + Name(cloudCredential.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cloudCredential). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cloudCredential and deletes it. Returns an error if one occurs. +func (c *cloudCredentials) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("cloudcredentials"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cloudCredentials) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("cloudcredentials"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cloudCredential. +func (c *cloudCredentials) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CloudCredential, err error) { + result = &v1.CloudCredential{} + err = c.client.Patch(pt). + Resource("cloudcredentials"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/clustercsidriver.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/clustercsidriver.go new file mode 100644 index 0000000000..26827b9551 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/clustercsidriver.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ClusterCSIDriversGetter has a method to return a ClusterCSIDriverInterface. +// A group's client should implement this interface. +type ClusterCSIDriversGetter interface { + ClusterCSIDrivers() ClusterCSIDriverInterface +} + +// ClusterCSIDriverInterface has methods to work with ClusterCSIDriver resources. +type ClusterCSIDriverInterface interface { + Create(ctx context.Context, clusterCSIDriver *v1.ClusterCSIDriver, opts metav1.CreateOptions) (*v1.ClusterCSIDriver, error) + Update(ctx context.Context, clusterCSIDriver *v1.ClusterCSIDriver, opts metav1.UpdateOptions) (*v1.ClusterCSIDriver, error) + UpdateStatus(ctx context.Context, clusterCSIDriver *v1.ClusterCSIDriver, opts metav1.UpdateOptions) (*v1.ClusterCSIDriver, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterCSIDriver, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterCSIDriverList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterCSIDriver, err error) + ClusterCSIDriverExpansion +} + +// clusterCSIDrivers implements ClusterCSIDriverInterface +type clusterCSIDrivers struct { + client rest.Interface +} + +// newClusterCSIDrivers returns a ClusterCSIDrivers +func newClusterCSIDrivers(c *OperatorV1Client) *clusterCSIDrivers { + return &clusterCSIDrivers{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterCSIDriver, and returns the corresponding clusterCSIDriver object, and an error if there is any. +func (c *clusterCSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterCSIDriver, err error) { + result = &v1.ClusterCSIDriver{} + err = c.client.Get(). + Resource("clustercsidrivers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterCSIDrivers that match those selectors. +func (c *clusterCSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterCSIDriverList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ClusterCSIDriverList{} + err = c.client.Get(). + Resource("clustercsidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterCSIDrivers. +func (c *clusterCSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustercsidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterCSIDriver and creates it. Returns the server's representation of the clusterCSIDriver, and an error, if there is any. +func (c *clusterCSIDrivers) Create(ctx context.Context, clusterCSIDriver *v1.ClusterCSIDriver, opts metav1.CreateOptions) (result *v1.ClusterCSIDriver, err error) { + result = &v1.ClusterCSIDriver{} + err = c.client.Post(). + Resource("clustercsidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCSIDriver). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterCSIDriver and updates it. Returns the server's representation of the clusterCSIDriver, and an error, if there is any. +func (c *clusterCSIDrivers) Update(ctx context.Context, clusterCSIDriver *v1.ClusterCSIDriver, opts metav1.UpdateOptions) (result *v1.ClusterCSIDriver, err error) { + result = &v1.ClusterCSIDriver{} + err = c.client.Put(). + Resource("clustercsidrivers"). + Name(clusterCSIDriver.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCSIDriver). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *clusterCSIDrivers) UpdateStatus(ctx context.Context, clusterCSIDriver *v1.ClusterCSIDriver, opts metav1.UpdateOptions) (result *v1.ClusterCSIDriver, err error) { + result = &v1.ClusterCSIDriver{} + err = c.client.Put(). + Resource("clustercsidrivers"). + Name(clusterCSIDriver.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCSIDriver). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterCSIDriver and deletes it. Returns an error if one occurs. +func (c *clusterCSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustercsidrivers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterCSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustercsidrivers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterCSIDriver. +func (c *clusterCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterCSIDriver, err error) { + result = &v1.ClusterCSIDriver{} + err = c.client.Patch(pt). + Resource("clustercsidrivers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/config.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/config.go new file mode 100644 index 0000000000..5704816014 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/config.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ConfigsGetter has a method to return a ConfigInterface. +// A group's client should implement this interface. +type ConfigsGetter interface { + Configs() ConfigInterface +} + +// ConfigInterface has methods to work with Config resources. +type ConfigInterface interface { + Create(ctx context.Context, config *v1.Config, opts metav1.CreateOptions) (*v1.Config, error) + Update(ctx context.Context, config *v1.Config, opts metav1.UpdateOptions) (*v1.Config, error) + UpdateStatus(ctx context.Context, config *v1.Config, opts metav1.UpdateOptions) (*v1.Config, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Config, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Config, err error) + ConfigExpansion +} + +// configs implements ConfigInterface +type configs struct { + client rest.Interface +} + +// newConfigs returns a Configs +func newConfigs(c *OperatorV1Client) *configs { + return &configs{ + client: c.RESTClient(), + } +} + +// Get takes name of the config, and returns the corresponding config object, and an error if there is any. +func (c *configs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Config, err error) { + result = &v1.Config{} + err = c.client.Get(). + Resource("configs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Configs that match those selectors. +func (c *configs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ConfigList{} + err = c.client.Get(). + Resource("configs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configs. +func (c *configs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("configs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a config and creates it. Returns the server's representation of the config, and an error, if there is any. +func (c *configs) Create(ctx context.Context, config *v1.Config, opts metav1.CreateOptions) (result *v1.Config, err error) { + result = &v1.Config{} + err = c.client.Post(). + Resource("configs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(config). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a config and updates it. Returns the server's representation of the config, and an error, if there is any. +func (c *configs) Update(ctx context.Context, config *v1.Config, opts metav1.UpdateOptions) (result *v1.Config, err error) { + result = &v1.Config{} + err = c.client.Put(). + Resource("configs"). + Name(config.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(config). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *configs) UpdateStatus(ctx context.Context, config *v1.Config, opts metav1.UpdateOptions) (result *v1.Config, err error) { + result = &v1.Config{} + err = c.client.Put(). + Resource("configs"). + Name(config.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(config). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the config and deletes it. Returns an error if one occurs. +func (c *configs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("configs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *configs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("configs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched config. +func (c *configs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Config, err error) { + result = &v1.Config{} + err = c.client.Patch(pt). + Resource("configs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/console.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/console.go new file mode 100644 index 0000000000..cc81186be2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/console.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ConsolesGetter has a method to return a ConsoleInterface. +// A group's client should implement this interface. +type ConsolesGetter interface { + Consoles() ConsoleInterface +} + +// ConsoleInterface has methods to work with Console resources. +type ConsoleInterface interface { + Create(ctx context.Context, console *v1.Console, opts metav1.CreateOptions) (*v1.Console, error) + Update(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (*v1.Console, error) + UpdateStatus(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (*v1.Console, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Console, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ConsoleList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Console, err error) + ConsoleExpansion +} + +// consoles implements ConsoleInterface +type consoles struct { + client rest.Interface +} + +// newConsoles returns a Consoles +func newConsoles(c *OperatorV1Client) *consoles { + return &consoles{ + client: c.RESTClient(), + } +} + +// Get takes name of the console, and returns the corresponding console object, and an error if there is any. +func (c *consoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Console, err error) { + result = &v1.Console{} + err = c.client.Get(). + Resource("consoles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Consoles that match those selectors. +func (c *consoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConsoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ConsoleList{} + err = c.client.Get(). + Resource("consoles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested consoles. +func (c *consoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("consoles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a console and creates it. Returns the server's representation of the console, and an error, if there is any. +func (c *consoles) Create(ctx context.Context, console *v1.Console, opts metav1.CreateOptions) (result *v1.Console, err error) { + result = &v1.Console{} + err = c.client.Post(). + Resource("consoles"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(console). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a console and updates it. Returns the server's representation of the console, and an error, if there is any. +func (c *consoles) Update(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (result *v1.Console, err error) { + result = &v1.Console{} + err = c.client.Put(). + Resource("consoles"). + Name(console.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(console). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *consoles) UpdateStatus(ctx context.Context, console *v1.Console, opts metav1.UpdateOptions) (result *v1.Console, err error) { + result = &v1.Console{} + err = c.client.Put(). + Resource("consoles"). + Name(console.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(console). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the console and deletes it. Returns an error if one occurs. +func (c *consoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("consoles"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *consoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("consoles"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched console. +func (c *consoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Console, err error) { + result = &v1.Console{} + err = c.client.Patch(pt). + Resource("consoles"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/csisnapshotcontroller.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/csisnapshotcontroller.go new file mode 100644 index 0000000000..0cbf7ea5aa --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/csisnapshotcontroller.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CSISnapshotControllersGetter has a method to return a CSISnapshotControllerInterface. +// A group's client should implement this interface. +type CSISnapshotControllersGetter interface { + CSISnapshotControllers() CSISnapshotControllerInterface +} + +// CSISnapshotControllerInterface has methods to work with CSISnapshotController resources. +type CSISnapshotControllerInterface interface { + Create(ctx context.Context, cSISnapshotController *v1.CSISnapshotController, opts metav1.CreateOptions) (*v1.CSISnapshotController, error) + Update(ctx context.Context, cSISnapshotController *v1.CSISnapshotController, opts metav1.UpdateOptions) (*v1.CSISnapshotController, error) + UpdateStatus(ctx context.Context, cSISnapshotController *v1.CSISnapshotController, opts metav1.UpdateOptions) (*v1.CSISnapshotController, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSISnapshotController, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CSISnapshotControllerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSISnapshotController, err error) + CSISnapshotControllerExpansion +} + +// cSISnapshotControllers implements CSISnapshotControllerInterface +type cSISnapshotControllers struct { + client rest.Interface +} + +// newCSISnapshotControllers returns a CSISnapshotControllers +func newCSISnapshotControllers(c *OperatorV1Client) *cSISnapshotControllers { + return &cSISnapshotControllers{ + client: c.RESTClient(), + } +} + +// Get takes name of the cSISnapshotController, and returns the corresponding cSISnapshotController object, and an error if there is any. +func (c *cSISnapshotControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSISnapshotController, err error) { + result = &v1.CSISnapshotController{} + err = c.client.Get(). + Resource("csisnapshotcontrollers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CSISnapshotControllers that match those selectors. +func (c *cSISnapshotControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSISnapshotControllerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CSISnapshotControllerList{} + err = c.client.Get(). + Resource("csisnapshotcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cSISnapshotControllers. +func (c *cSISnapshotControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("csisnapshotcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cSISnapshotController and creates it. Returns the server's representation of the cSISnapshotController, and an error, if there is any. +func (c *cSISnapshotControllers) Create(ctx context.Context, cSISnapshotController *v1.CSISnapshotController, opts metav1.CreateOptions) (result *v1.CSISnapshotController, err error) { + result = &v1.CSISnapshotController{} + err = c.client.Post(). + Resource("csisnapshotcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cSISnapshotController). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cSISnapshotController and updates it. Returns the server's representation of the cSISnapshotController, and an error, if there is any. +func (c *cSISnapshotControllers) Update(ctx context.Context, cSISnapshotController *v1.CSISnapshotController, opts metav1.UpdateOptions) (result *v1.CSISnapshotController, err error) { + result = &v1.CSISnapshotController{} + err = c.client.Put(). + Resource("csisnapshotcontrollers"). + Name(cSISnapshotController.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cSISnapshotController). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *cSISnapshotControllers) UpdateStatus(ctx context.Context, cSISnapshotController *v1.CSISnapshotController, opts metav1.UpdateOptions) (result *v1.CSISnapshotController, err error) { + result = &v1.CSISnapshotController{} + err = c.client.Put(). + Resource("csisnapshotcontrollers"). + Name(cSISnapshotController.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cSISnapshotController). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cSISnapshotController and deletes it. Returns an error if one occurs. +func (c *cSISnapshotControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("csisnapshotcontrollers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cSISnapshotControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("csisnapshotcontrollers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cSISnapshotController. +func (c *cSISnapshotControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSISnapshotController, err error) { + result = &v1.CSISnapshotController{} + err = c.client.Patch(pt). + Resource("csisnapshotcontrollers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/dns.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/dns.go new file mode 100644 index 0000000000..6e18968d15 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/dns.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// DNSesGetter has a method to return a DNSInterface. +// A group's client should implement this interface. +type DNSesGetter interface { + DNSes() DNSInterface +} + +// DNSInterface has methods to work with DNS resources. +type DNSInterface interface { + Create(ctx context.Context, dNS *v1.DNS, opts metav1.CreateOptions) (*v1.DNS, error) + Update(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (*v1.DNS, error) + UpdateStatus(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (*v1.DNS, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DNS, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.DNSList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DNS, err error) + DNSExpansion +} + +// dNSes implements DNSInterface +type dNSes struct { + client rest.Interface +} + +// newDNSes returns a DNSes +func newDNSes(c *OperatorV1Client) *dNSes { + return &dNSes{ + client: c.RESTClient(), + } +} + +// Get takes name of the dNS, and returns the corresponding dNS object, and an error if there is any. +func (c *dNSes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DNS, err error) { + result = &v1.DNS{} + err = c.client.Get(). + Resource("dnses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DNSes that match those selectors. +func (c *dNSes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DNSList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.DNSList{} + err = c.client.Get(). + Resource("dnses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested dNSes. +func (c *dNSes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("dnses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a dNS and creates it. Returns the server's representation of the dNS, and an error, if there is any. +func (c *dNSes) Create(ctx context.Context, dNS *v1.DNS, opts metav1.CreateOptions) (result *v1.DNS, err error) { + result = &v1.DNS{} + err = c.client.Post(). + Resource("dnses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(dNS). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a dNS and updates it. Returns the server's representation of the dNS, and an error, if there is any. +func (c *dNSes) Update(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (result *v1.DNS, err error) { + result = &v1.DNS{} + err = c.client.Put(). + Resource("dnses"). + Name(dNS.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(dNS). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *dNSes) UpdateStatus(ctx context.Context, dNS *v1.DNS, opts metav1.UpdateOptions) (result *v1.DNS, err error) { + result = &v1.DNS{} + err = c.client.Put(). + Resource("dnses"). + Name(dNS.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(dNS). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the dNS and deletes it. Returns an error if one occurs. +func (c *dNSes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("dnses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *dNSes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("dnses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched dNS. +func (c *dNSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DNS, err error) { + result = &v1.DNS{} + err = c.client.Patch(pt). + Resource("dnses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/doc.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/doc.go new file mode 100644 index 0000000000..225e6b2be3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/etcd.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/etcd.go new file mode 100644 index 0000000000..b013700893 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/etcd.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// EtcdsGetter has a method to return a EtcdInterface. +// A group's client should implement this interface. +type EtcdsGetter interface { + Etcds() EtcdInterface +} + +// EtcdInterface has methods to work with Etcd resources. +type EtcdInterface interface { + Create(ctx context.Context, etcd *v1.Etcd, opts metav1.CreateOptions) (*v1.Etcd, error) + Update(ctx context.Context, etcd *v1.Etcd, opts metav1.UpdateOptions) (*v1.Etcd, error) + UpdateStatus(ctx context.Context, etcd *v1.Etcd, opts metav1.UpdateOptions) (*v1.Etcd, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Etcd, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.EtcdList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Etcd, err error) + EtcdExpansion +} + +// etcds implements EtcdInterface +type etcds struct { + client rest.Interface +} + +// newEtcds returns a Etcds +func newEtcds(c *OperatorV1Client) *etcds { + return &etcds{ + client: c.RESTClient(), + } +} + +// Get takes name of the etcd, and returns the corresponding etcd object, and an error if there is any. +func (c *etcds) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Etcd, err error) { + result = &v1.Etcd{} + err = c.client.Get(). + Resource("etcds"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Etcds that match those selectors. +func (c *etcds) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EtcdList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.EtcdList{} + err = c.client.Get(). + Resource("etcds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested etcds. +func (c *etcds) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("etcds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a etcd and creates it. Returns the server's representation of the etcd, and an error, if there is any. +func (c *etcds) Create(ctx context.Context, etcd *v1.Etcd, opts metav1.CreateOptions) (result *v1.Etcd, err error) { + result = &v1.Etcd{} + err = c.client.Post(). + Resource("etcds"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(etcd). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a etcd and updates it. Returns the server's representation of the etcd, and an error, if there is any. +func (c *etcds) Update(ctx context.Context, etcd *v1.Etcd, opts metav1.UpdateOptions) (result *v1.Etcd, err error) { + result = &v1.Etcd{} + err = c.client.Put(). + Resource("etcds"). + Name(etcd.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(etcd). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *etcds) UpdateStatus(ctx context.Context, etcd *v1.Etcd, opts metav1.UpdateOptions) (result *v1.Etcd, err error) { + result = &v1.Etcd{} + err = c.client.Put(). + Resource("etcds"). + Name(etcd.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(etcd). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the etcd and deletes it. Returns an error if one occurs. +func (c *etcds) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("etcds"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *etcds) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("etcds"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched etcd. +func (c *etcds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Etcd, err error) { + result = &v1.Etcd{} + err = c.client.Patch(pt). + Resource("etcds"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/doc.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/doc.go new file mode 100644 index 0000000000..2b5ba4c8e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_authentication.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_authentication.go new file mode 100644 index 0000000000..e2f0a6c310 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_authentication.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeAuthentications implements AuthenticationInterface +type FakeAuthentications struct { + Fake *FakeOperatorV1 +} + +var authenticationsResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "authentications"} + +var authenticationsKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "Authentication"} + +// Get takes name of the authentication, and returns the corresponding authentication object, and an error if there is any. +func (c *FakeAuthentications) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.Authentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(authenticationsResource, name), &operatorv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Authentication), err +} + +// List takes label and field selectors, and returns the list of Authentications that match those selectors. +func (c *FakeAuthentications) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.AuthenticationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(authenticationsResource, authenticationsKind, opts), &operatorv1.AuthenticationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.AuthenticationList{ListMeta: obj.(*operatorv1.AuthenticationList).ListMeta} + for _, item := range obj.(*operatorv1.AuthenticationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested authentications. +func (c *FakeAuthentications) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(authenticationsResource, opts)) +} + +// Create takes the representation of a authentication and creates it. Returns the server's representation of the authentication, and an error, if there is any. +func (c *FakeAuthentications) Create(ctx context.Context, authentication *operatorv1.Authentication, opts v1.CreateOptions) (result *operatorv1.Authentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(authenticationsResource, authentication), &operatorv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Authentication), err +} + +// Update takes the representation of a authentication and updates it. Returns the server's representation of the authentication, and an error, if there is any. +func (c *FakeAuthentications) Update(ctx context.Context, authentication *operatorv1.Authentication, opts v1.UpdateOptions) (result *operatorv1.Authentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(authenticationsResource, authentication), &operatorv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Authentication), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeAuthentications) UpdateStatus(ctx context.Context, authentication *operatorv1.Authentication, opts v1.UpdateOptions) (*operatorv1.Authentication, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(authenticationsResource, "status", authentication), &operatorv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Authentication), err +} + +// Delete takes name of the authentication and deletes it. Returns an error if one occurs. +func (c *FakeAuthentications) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(authenticationsResource, name), &operatorv1.Authentication{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeAuthentications) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(authenticationsResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.AuthenticationList{}) + return err +} + +// Patch applies the patch and returns the patched authentication. +func (c *FakeAuthentications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.Authentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(authenticationsResource, name, pt, data, subresources...), &operatorv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Authentication), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_cloudcredential.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_cloudcredential.go new file mode 100644 index 0000000000..9d80df1506 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_cloudcredential.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCloudCredentials implements CloudCredentialInterface +type FakeCloudCredentials struct { + Fake *FakeOperatorV1 +} + +var cloudcredentialsResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "cloudcredentials"} + +var cloudcredentialsKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "CloudCredential"} + +// Get takes name of the cloudCredential, and returns the corresponding cloudCredential object, and an error if there is any. +func (c *FakeCloudCredentials) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.CloudCredential, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(cloudcredentialsResource, name), &operatorv1.CloudCredential{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CloudCredential), err +} + +// List takes label and field selectors, and returns the list of CloudCredentials that match those selectors. +func (c *FakeCloudCredentials) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.CloudCredentialList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(cloudcredentialsResource, cloudcredentialsKind, opts), &operatorv1.CloudCredentialList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.CloudCredentialList{ListMeta: obj.(*operatorv1.CloudCredentialList).ListMeta} + for _, item := range obj.(*operatorv1.CloudCredentialList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested cloudCredentials. +func (c *FakeCloudCredentials) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(cloudcredentialsResource, opts)) +} + +// Create takes the representation of a cloudCredential and creates it. Returns the server's representation of the cloudCredential, and an error, if there is any. +func (c *FakeCloudCredentials) Create(ctx context.Context, cloudCredential *operatorv1.CloudCredential, opts v1.CreateOptions) (result *operatorv1.CloudCredential, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(cloudcredentialsResource, cloudCredential), &operatorv1.CloudCredential{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CloudCredential), err +} + +// Update takes the representation of a cloudCredential and updates it. Returns the server's representation of the cloudCredential, and an error, if there is any. +func (c *FakeCloudCredentials) Update(ctx context.Context, cloudCredential *operatorv1.CloudCredential, opts v1.UpdateOptions) (result *operatorv1.CloudCredential, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(cloudcredentialsResource, cloudCredential), &operatorv1.CloudCredential{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CloudCredential), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCloudCredentials) UpdateStatus(ctx context.Context, cloudCredential *operatorv1.CloudCredential, opts v1.UpdateOptions) (*operatorv1.CloudCredential, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(cloudcredentialsResource, "status", cloudCredential), &operatorv1.CloudCredential{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CloudCredential), err +} + +// Delete takes name of the cloudCredential and deletes it. Returns an error if one occurs. +func (c *FakeCloudCredentials) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(cloudcredentialsResource, name), &operatorv1.CloudCredential{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCloudCredentials) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(cloudcredentialsResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.CloudCredentialList{}) + return err +} + +// Patch applies the patch and returns the patched cloudCredential. +func (c *FakeCloudCredentials) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.CloudCredential, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(cloudcredentialsResource, name, pt, data, subresources...), &operatorv1.CloudCredential{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CloudCredential), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_clustercsidriver.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_clustercsidriver.go new file mode 100644 index 0000000000..5f3bfba6f5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_clustercsidriver.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterCSIDrivers implements ClusterCSIDriverInterface +type FakeClusterCSIDrivers struct { + Fake *FakeOperatorV1 +} + +var clustercsidriversResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "clustercsidrivers"} + +var clustercsidriversKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "ClusterCSIDriver"} + +// Get takes name of the clusterCSIDriver, and returns the corresponding clusterCSIDriver object, and an error if there is any. +func (c *FakeClusterCSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.ClusterCSIDriver, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clustercsidriversResource, name), &operatorv1.ClusterCSIDriver{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ClusterCSIDriver), err +} + +// List takes label and field selectors, and returns the list of ClusterCSIDrivers that match those selectors. +func (c *FakeClusterCSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.ClusterCSIDriverList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clustercsidriversResource, clustercsidriversKind, opts), &operatorv1.ClusterCSIDriverList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.ClusterCSIDriverList{ListMeta: obj.(*operatorv1.ClusterCSIDriverList).ListMeta} + for _, item := range obj.(*operatorv1.ClusterCSIDriverList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterCSIDrivers. +func (c *FakeClusterCSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clustercsidriversResource, opts)) +} + +// Create takes the representation of a clusterCSIDriver and creates it. Returns the server's representation of the clusterCSIDriver, and an error, if there is any. +func (c *FakeClusterCSIDrivers) Create(ctx context.Context, clusterCSIDriver *operatorv1.ClusterCSIDriver, opts v1.CreateOptions) (result *operatorv1.ClusterCSIDriver, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clustercsidriversResource, clusterCSIDriver), &operatorv1.ClusterCSIDriver{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ClusterCSIDriver), err +} + +// Update takes the representation of a clusterCSIDriver and updates it. Returns the server's representation of the clusterCSIDriver, and an error, if there is any. +func (c *FakeClusterCSIDrivers) Update(ctx context.Context, clusterCSIDriver *operatorv1.ClusterCSIDriver, opts v1.UpdateOptions) (result *operatorv1.ClusterCSIDriver, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clustercsidriversResource, clusterCSIDriver), &operatorv1.ClusterCSIDriver{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ClusterCSIDriver), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterCSIDrivers) UpdateStatus(ctx context.Context, clusterCSIDriver *operatorv1.ClusterCSIDriver, opts v1.UpdateOptions) (*operatorv1.ClusterCSIDriver, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(clustercsidriversResource, "status", clusterCSIDriver), &operatorv1.ClusterCSIDriver{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ClusterCSIDriver), err +} + +// Delete takes name of the clusterCSIDriver and deletes it. Returns an error if one occurs. +func (c *FakeClusterCSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clustercsidriversResource, name), &operatorv1.ClusterCSIDriver{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterCSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clustercsidriversResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.ClusterCSIDriverList{}) + return err +} + +// Patch applies the patch and returns the patched clusterCSIDriver. +func (c *FakeClusterCSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.ClusterCSIDriver, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clustercsidriversResource, name, pt, data, subresources...), &operatorv1.ClusterCSIDriver{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ClusterCSIDriver), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_config.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_config.go new file mode 100644 index 0000000000..a3d2248fba --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_config.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeConfigs implements ConfigInterface +type FakeConfigs struct { + Fake *FakeOperatorV1 +} + +var configsResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "configs"} + +var configsKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "Config"} + +// Get takes name of the config, and returns the corresponding config object, and an error if there is any. +func (c *FakeConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.Config, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(configsResource, name), &operatorv1.Config{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Config), err +} + +// List takes label and field selectors, and returns the list of Configs that match those selectors. +func (c *FakeConfigs) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.ConfigList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(configsResource, configsKind, opts), &operatorv1.ConfigList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.ConfigList{ListMeta: obj.(*operatorv1.ConfigList).ListMeta} + for _, item := range obj.(*operatorv1.ConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested configs. +func (c *FakeConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(configsResource, opts)) +} + +// Create takes the representation of a config and creates it. Returns the server's representation of the config, and an error, if there is any. +func (c *FakeConfigs) Create(ctx context.Context, config *operatorv1.Config, opts v1.CreateOptions) (result *operatorv1.Config, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(configsResource, config), &operatorv1.Config{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Config), err +} + +// Update takes the representation of a config and updates it. Returns the server's representation of the config, and an error, if there is any. +func (c *FakeConfigs) Update(ctx context.Context, config *operatorv1.Config, opts v1.UpdateOptions) (result *operatorv1.Config, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(configsResource, config), &operatorv1.Config{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Config), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeConfigs) UpdateStatus(ctx context.Context, config *operatorv1.Config, opts v1.UpdateOptions) (*operatorv1.Config, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(configsResource, "status", config), &operatorv1.Config{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Config), err +} + +// Delete takes name of the config and deletes it. Returns an error if one occurs. +func (c *FakeConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(configsResource, name), &operatorv1.Config{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(configsResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.ConfigList{}) + return err +} + +// Patch applies the patch and returns the patched config. +func (c *FakeConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.Config, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(configsResource, name, pt, data, subresources...), &operatorv1.Config{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Config), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_console.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_console.go new file mode 100644 index 0000000000..26461daeed --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_console.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeConsoles implements ConsoleInterface +type FakeConsoles struct { + Fake *FakeOperatorV1 +} + +var consolesResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "consoles"} + +var consolesKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "Console"} + +// Get takes name of the console, and returns the corresponding console object, and an error if there is any. +func (c *FakeConsoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.Console, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(consolesResource, name), &operatorv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Console), err +} + +// List takes label and field selectors, and returns the list of Consoles that match those selectors. +func (c *FakeConsoles) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.ConsoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(consolesResource, consolesKind, opts), &operatorv1.ConsoleList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.ConsoleList{ListMeta: obj.(*operatorv1.ConsoleList).ListMeta} + for _, item := range obj.(*operatorv1.ConsoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested consoles. +func (c *FakeConsoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(consolesResource, opts)) +} + +// Create takes the representation of a console and creates it. Returns the server's representation of the console, and an error, if there is any. +func (c *FakeConsoles) Create(ctx context.Context, console *operatorv1.Console, opts v1.CreateOptions) (result *operatorv1.Console, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(consolesResource, console), &operatorv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Console), err +} + +// Update takes the representation of a console and updates it. Returns the server's representation of the console, and an error, if there is any. +func (c *FakeConsoles) Update(ctx context.Context, console *operatorv1.Console, opts v1.UpdateOptions) (result *operatorv1.Console, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(consolesResource, console), &operatorv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Console), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeConsoles) UpdateStatus(ctx context.Context, console *operatorv1.Console, opts v1.UpdateOptions) (*operatorv1.Console, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(consolesResource, "status", console), &operatorv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Console), err +} + +// Delete takes name of the console and deletes it. Returns an error if one occurs. +func (c *FakeConsoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(consolesResource, name), &operatorv1.Console{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeConsoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(consolesResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.ConsoleList{}) + return err +} + +// Patch applies the patch and returns the patched console. +func (c *FakeConsoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.Console, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(consolesResource, name, pt, data, subresources...), &operatorv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Console), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_csisnapshotcontroller.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_csisnapshotcontroller.go new file mode 100644 index 0000000000..21fedf57b8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_csisnapshotcontroller.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCSISnapshotControllers implements CSISnapshotControllerInterface +type FakeCSISnapshotControllers struct { + Fake *FakeOperatorV1 +} + +var csisnapshotcontrollersResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "csisnapshotcontrollers"} + +var csisnapshotcontrollersKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "CSISnapshotController"} + +// Get takes name of the cSISnapshotController, and returns the corresponding cSISnapshotController object, and an error if there is any. +func (c *FakeCSISnapshotControllers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.CSISnapshotController, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(csisnapshotcontrollersResource, name), &operatorv1.CSISnapshotController{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CSISnapshotController), err +} + +// List takes label and field selectors, and returns the list of CSISnapshotControllers that match those selectors. +func (c *FakeCSISnapshotControllers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.CSISnapshotControllerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(csisnapshotcontrollersResource, csisnapshotcontrollersKind, opts), &operatorv1.CSISnapshotControllerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.CSISnapshotControllerList{ListMeta: obj.(*operatorv1.CSISnapshotControllerList).ListMeta} + for _, item := range obj.(*operatorv1.CSISnapshotControllerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested cSISnapshotControllers. +func (c *FakeCSISnapshotControllers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(csisnapshotcontrollersResource, opts)) +} + +// Create takes the representation of a cSISnapshotController and creates it. Returns the server's representation of the cSISnapshotController, and an error, if there is any. +func (c *FakeCSISnapshotControllers) Create(ctx context.Context, cSISnapshotController *operatorv1.CSISnapshotController, opts v1.CreateOptions) (result *operatorv1.CSISnapshotController, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(csisnapshotcontrollersResource, cSISnapshotController), &operatorv1.CSISnapshotController{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CSISnapshotController), err +} + +// Update takes the representation of a cSISnapshotController and updates it. Returns the server's representation of the cSISnapshotController, and an error, if there is any. +func (c *FakeCSISnapshotControllers) Update(ctx context.Context, cSISnapshotController *operatorv1.CSISnapshotController, opts v1.UpdateOptions) (result *operatorv1.CSISnapshotController, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(csisnapshotcontrollersResource, cSISnapshotController), &operatorv1.CSISnapshotController{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CSISnapshotController), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCSISnapshotControllers) UpdateStatus(ctx context.Context, cSISnapshotController *operatorv1.CSISnapshotController, opts v1.UpdateOptions) (*operatorv1.CSISnapshotController, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(csisnapshotcontrollersResource, "status", cSISnapshotController), &operatorv1.CSISnapshotController{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CSISnapshotController), err +} + +// Delete takes name of the cSISnapshotController and deletes it. Returns an error if one occurs. +func (c *FakeCSISnapshotControllers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(csisnapshotcontrollersResource, name), &operatorv1.CSISnapshotController{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCSISnapshotControllers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(csisnapshotcontrollersResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.CSISnapshotControllerList{}) + return err +} + +// Patch applies the patch and returns the patched cSISnapshotController. +func (c *FakeCSISnapshotControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.CSISnapshotController, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(csisnapshotcontrollersResource, name, pt, data, subresources...), &operatorv1.CSISnapshotController{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.CSISnapshotController), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_dns.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_dns.go new file mode 100644 index 0000000000..bacf117cfd --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_dns.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDNSes implements DNSInterface +type FakeDNSes struct { + Fake *FakeOperatorV1 +} + +var dnsesResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "dnses"} + +var dnsesKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "DNS"} + +// Get takes name of the dNS, and returns the corresponding dNS object, and an error if there is any. +func (c *FakeDNSes) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.DNS, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(dnsesResource, name), &operatorv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.DNS), err +} + +// List takes label and field selectors, and returns the list of DNSes that match those selectors. +func (c *FakeDNSes) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.DNSList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(dnsesResource, dnsesKind, opts), &operatorv1.DNSList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.DNSList{ListMeta: obj.(*operatorv1.DNSList).ListMeta} + for _, item := range obj.(*operatorv1.DNSList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested dNSes. +func (c *FakeDNSes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(dnsesResource, opts)) +} + +// Create takes the representation of a dNS and creates it. Returns the server's representation of the dNS, and an error, if there is any. +func (c *FakeDNSes) Create(ctx context.Context, dNS *operatorv1.DNS, opts v1.CreateOptions) (result *operatorv1.DNS, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(dnsesResource, dNS), &operatorv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.DNS), err +} + +// Update takes the representation of a dNS and updates it. Returns the server's representation of the dNS, and an error, if there is any. +func (c *FakeDNSes) Update(ctx context.Context, dNS *operatorv1.DNS, opts v1.UpdateOptions) (result *operatorv1.DNS, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(dnsesResource, dNS), &operatorv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.DNS), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDNSes) UpdateStatus(ctx context.Context, dNS *operatorv1.DNS, opts v1.UpdateOptions) (*operatorv1.DNS, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(dnsesResource, "status", dNS), &operatorv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.DNS), err +} + +// Delete takes name of the dNS and deletes it. Returns an error if one occurs. +func (c *FakeDNSes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(dnsesResource, name), &operatorv1.DNS{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDNSes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(dnsesResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.DNSList{}) + return err +} + +// Patch applies the patch and returns the patched dNS. +func (c *FakeDNSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.DNS, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(dnsesResource, name, pt, data, subresources...), &operatorv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.DNS), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_etcd.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_etcd.go new file mode 100644 index 0000000000..30b446df39 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_etcd.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeEtcds implements EtcdInterface +type FakeEtcds struct { + Fake *FakeOperatorV1 +} + +var etcdsResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "etcds"} + +var etcdsKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "Etcd"} + +// Get takes name of the etcd, and returns the corresponding etcd object, and an error if there is any. +func (c *FakeEtcds) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.Etcd, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(etcdsResource, name), &operatorv1.Etcd{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Etcd), err +} + +// List takes label and field selectors, and returns the list of Etcds that match those selectors. +func (c *FakeEtcds) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.EtcdList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(etcdsResource, etcdsKind, opts), &operatorv1.EtcdList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.EtcdList{ListMeta: obj.(*operatorv1.EtcdList).ListMeta} + for _, item := range obj.(*operatorv1.EtcdList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested etcds. +func (c *FakeEtcds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(etcdsResource, opts)) +} + +// Create takes the representation of a etcd and creates it. Returns the server's representation of the etcd, and an error, if there is any. +func (c *FakeEtcds) Create(ctx context.Context, etcd *operatorv1.Etcd, opts v1.CreateOptions) (result *operatorv1.Etcd, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(etcdsResource, etcd), &operatorv1.Etcd{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Etcd), err +} + +// Update takes the representation of a etcd and updates it. Returns the server's representation of the etcd, and an error, if there is any. +func (c *FakeEtcds) Update(ctx context.Context, etcd *operatorv1.Etcd, opts v1.UpdateOptions) (result *operatorv1.Etcd, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(etcdsResource, etcd), &operatorv1.Etcd{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Etcd), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeEtcds) UpdateStatus(ctx context.Context, etcd *operatorv1.Etcd, opts v1.UpdateOptions) (*operatorv1.Etcd, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(etcdsResource, "status", etcd), &operatorv1.Etcd{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Etcd), err +} + +// Delete takes name of the etcd and deletes it. Returns an error if one occurs. +func (c *FakeEtcds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(etcdsResource, name), &operatorv1.Etcd{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeEtcds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(etcdsResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.EtcdList{}) + return err +} + +// Patch applies the patch and returns the patched etcd. +func (c *FakeEtcds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.Etcd, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(etcdsResource, name, pt, data, subresources...), &operatorv1.Etcd{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Etcd), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_ingresscontroller.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_ingresscontroller.go new file mode 100644 index 0000000000..d1ec59b4e3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_ingresscontroller.go @@ -0,0 +1,126 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeIngressControllers implements IngressControllerInterface +type FakeIngressControllers struct { + Fake *FakeOperatorV1 + ns string +} + +var ingresscontrollersResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "ingresscontrollers"} + +var ingresscontrollersKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "IngressController"} + +// Get takes name of the ingressController, and returns the corresponding ingressController object, and an error if there is any. +func (c *FakeIngressControllers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.IngressController, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ingresscontrollersResource, c.ns, name), &operatorv1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorv1.IngressController), err +} + +// List takes label and field selectors, and returns the list of IngressControllers that match those selectors. +func (c *FakeIngressControllers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.IngressControllerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ingresscontrollersResource, ingresscontrollersKind, c.ns, opts), &operatorv1.IngressControllerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.IngressControllerList{ListMeta: obj.(*operatorv1.IngressControllerList).ListMeta} + for _, item := range obj.(*operatorv1.IngressControllerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ingressControllers. +func (c *FakeIngressControllers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ingresscontrollersResource, c.ns, opts)) + +} + +// Create takes the representation of a ingressController and creates it. Returns the server's representation of the ingressController, and an error, if there is any. +func (c *FakeIngressControllers) Create(ctx context.Context, ingressController *operatorv1.IngressController, opts v1.CreateOptions) (result *operatorv1.IngressController, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ingresscontrollersResource, c.ns, ingressController), &operatorv1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorv1.IngressController), err +} + +// Update takes the representation of a ingressController and updates it. Returns the server's representation of the ingressController, and an error, if there is any. +func (c *FakeIngressControllers) Update(ctx context.Context, ingressController *operatorv1.IngressController, opts v1.UpdateOptions) (result *operatorv1.IngressController, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ingresscontrollersResource, c.ns, ingressController), &operatorv1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorv1.IngressController), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIngressControllers) UpdateStatus(ctx context.Context, ingressController *operatorv1.IngressController, opts v1.UpdateOptions) (*operatorv1.IngressController, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ingresscontrollersResource, "status", c.ns, ingressController), &operatorv1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorv1.IngressController), err +} + +// Delete takes name of the ingressController and deletes it. Returns an error if one occurs. +func (c *FakeIngressControllers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(ingresscontrollersResource, c.ns, name), &operatorv1.IngressController{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIngressControllers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ingresscontrollersResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.IngressControllerList{}) + return err +} + +// Patch applies the patch and returns the patched ingressController. +func (c *FakeIngressControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.IngressController, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ingresscontrollersResource, c.ns, name, pt, data, subresources...), &operatorv1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operatorv1.IngressController), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubeapiserver.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubeapiserver.go new file mode 100644 index 0000000000..dc7becf1d5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubeapiserver.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKubeAPIServers implements KubeAPIServerInterface +type FakeKubeAPIServers struct { + Fake *FakeOperatorV1 +} + +var kubeapiserversResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "kubeapiservers"} + +var kubeapiserversKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "KubeAPIServer"} + +// Get takes name of the kubeAPIServer, and returns the corresponding kubeAPIServer object, and an error if there is any. +func (c *FakeKubeAPIServers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.KubeAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(kubeapiserversResource, name), &operatorv1.KubeAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeAPIServer), err +} + +// List takes label and field selectors, and returns the list of KubeAPIServers that match those selectors. +func (c *FakeKubeAPIServers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.KubeAPIServerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(kubeapiserversResource, kubeapiserversKind, opts), &operatorv1.KubeAPIServerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.KubeAPIServerList{ListMeta: obj.(*operatorv1.KubeAPIServerList).ListMeta} + for _, item := range obj.(*operatorv1.KubeAPIServerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kubeAPIServers. +func (c *FakeKubeAPIServers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(kubeapiserversResource, opts)) +} + +// Create takes the representation of a kubeAPIServer and creates it. Returns the server's representation of the kubeAPIServer, and an error, if there is any. +func (c *FakeKubeAPIServers) Create(ctx context.Context, kubeAPIServer *operatorv1.KubeAPIServer, opts v1.CreateOptions) (result *operatorv1.KubeAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(kubeapiserversResource, kubeAPIServer), &operatorv1.KubeAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeAPIServer), err +} + +// Update takes the representation of a kubeAPIServer and updates it. Returns the server's representation of the kubeAPIServer, and an error, if there is any. +func (c *FakeKubeAPIServers) Update(ctx context.Context, kubeAPIServer *operatorv1.KubeAPIServer, opts v1.UpdateOptions) (result *operatorv1.KubeAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(kubeapiserversResource, kubeAPIServer), &operatorv1.KubeAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeAPIServer), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKubeAPIServers) UpdateStatus(ctx context.Context, kubeAPIServer *operatorv1.KubeAPIServer, opts v1.UpdateOptions) (*operatorv1.KubeAPIServer, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(kubeapiserversResource, "status", kubeAPIServer), &operatorv1.KubeAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeAPIServer), err +} + +// Delete takes name of the kubeAPIServer and deletes it. Returns an error if one occurs. +func (c *FakeKubeAPIServers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(kubeapiserversResource, name), &operatorv1.KubeAPIServer{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKubeAPIServers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(kubeapiserversResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.KubeAPIServerList{}) + return err +} + +// Patch applies the patch and returns the patched kubeAPIServer. +func (c *FakeKubeAPIServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.KubeAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(kubeapiserversResource, name, pt, data, subresources...), &operatorv1.KubeAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeAPIServer), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubecontrollermanager.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubecontrollermanager.go new file mode 100644 index 0000000000..e5bc5b0608 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubecontrollermanager.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKubeControllerManagers implements KubeControllerManagerInterface +type FakeKubeControllerManagers struct { + Fake *FakeOperatorV1 +} + +var kubecontrollermanagersResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "kubecontrollermanagers"} + +var kubecontrollermanagersKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "KubeControllerManager"} + +// Get takes name of the kubeControllerManager, and returns the corresponding kubeControllerManager object, and an error if there is any. +func (c *FakeKubeControllerManagers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.KubeControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(kubecontrollermanagersResource, name), &operatorv1.KubeControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeControllerManager), err +} + +// List takes label and field selectors, and returns the list of KubeControllerManagers that match those selectors. +func (c *FakeKubeControllerManagers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.KubeControllerManagerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(kubecontrollermanagersResource, kubecontrollermanagersKind, opts), &operatorv1.KubeControllerManagerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.KubeControllerManagerList{ListMeta: obj.(*operatorv1.KubeControllerManagerList).ListMeta} + for _, item := range obj.(*operatorv1.KubeControllerManagerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kubeControllerManagers. +func (c *FakeKubeControllerManagers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(kubecontrollermanagersResource, opts)) +} + +// Create takes the representation of a kubeControllerManager and creates it. Returns the server's representation of the kubeControllerManager, and an error, if there is any. +func (c *FakeKubeControllerManagers) Create(ctx context.Context, kubeControllerManager *operatorv1.KubeControllerManager, opts v1.CreateOptions) (result *operatorv1.KubeControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(kubecontrollermanagersResource, kubeControllerManager), &operatorv1.KubeControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeControllerManager), err +} + +// Update takes the representation of a kubeControllerManager and updates it. Returns the server's representation of the kubeControllerManager, and an error, if there is any. +func (c *FakeKubeControllerManagers) Update(ctx context.Context, kubeControllerManager *operatorv1.KubeControllerManager, opts v1.UpdateOptions) (result *operatorv1.KubeControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(kubecontrollermanagersResource, kubeControllerManager), &operatorv1.KubeControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeControllerManager), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKubeControllerManagers) UpdateStatus(ctx context.Context, kubeControllerManager *operatorv1.KubeControllerManager, opts v1.UpdateOptions) (*operatorv1.KubeControllerManager, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(kubecontrollermanagersResource, "status", kubeControllerManager), &operatorv1.KubeControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeControllerManager), err +} + +// Delete takes name of the kubeControllerManager and deletes it. Returns an error if one occurs. +func (c *FakeKubeControllerManagers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(kubecontrollermanagersResource, name), &operatorv1.KubeControllerManager{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKubeControllerManagers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(kubecontrollermanagersResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.KubeControllerManagerList{}) + return err +} + +// Patch applies the patch and returns the patched kubeControllerManager. +func (c *FakeKubeControllerManagers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.KubeControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(kubecontrollermanagersResource, name, pt, data, subresources...), &operatorv1.KubeControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeControllerManager), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubescheduler.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubescheduler.go new file mode 100644 index 0000000000..d1c2bdd741 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubescheduler.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKubeSchedulers implements KubeSchedulerInterface +type FakeKubeSchedulers struct { + Fake *FakeOperatorV1 +} + +var kubeschedulersResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "kubeschedulers"} + +var kubeschedulersKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "KubeScheduler"} + +// Get takes name of the kubeScheduler, and returns the corresponding kubeScheduler object, and an error if there is any. +func (c *FakeKubeSchedulers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.KubeScheduler, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(kubeschedulersResource, name), &operatorv1.KubeScheduler{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeScheduler), err +} + +// List takes label and field selectors, and returns the list of KubeSchedulers that match those selectors. +func (c *FakeKubeSchedulers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.KubeSchedulerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(kubeschedulersResource, kubeschedulersKind, opts), &operatorv1.KubeSchedulerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.KubeSchedulerList{ListMeta: obj.(*operatorv1.KubeSchedulerList).ListMeta} + for _, item := range obj.(*operatorv1.KubeSchedulerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kubeSchedulers. +func (c *FakeKubeSchedulers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(kubeschedulersResource, opts)) +} + +// Create takes the representation of a kubeScheduler and creates it. Returns the server's representation of the kubeScheduler, and an error, if there is any. +func (c *FakeKubeSchedulers) Create(ctx context.Context, kubeScheduler *operatorv1.KubeScheduler, opts v1.CreateOptions) (result *operatorv1.KubeScheduler, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(kubeschedulersResource, kubeScheduler), &operatorv1.KubeScheduler{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeScheduler), err +} + +// Update takes the representation of a kubeScheduler and updates it. Returns the server's representation of the kubeScheduler, and an error, if there is any. +func (c *FakeKubeSchedulers) Update(ctx context.Context, kubeScheduler *operatorv1.KubeScheduler, opts v1.UpdateOptions) (result *operatorv1.KubeScheduler, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(kubeschedulersResource, kubeScheduler), &operatorv1.KubeScheduler{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeScheduler), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKubeSchedulers) UpdateStatus(ctx context.Context, kubeScheduler *operatorv1.KubeScheduler, opts v1.UpdateOptions) (*operatorv1.KubeScheduler, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(kubeschedulersResource, "status", kubeScheduler), &operatorv1.KubeScheduler{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeScheduler), err +} + +// Delete takes name of the kubeScheduler and deletes it. Returns an error if one occurs. +func (c *FakeKubeSchedulers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(kubeschedulersResource, name), &operatorv1.KubeScheduler{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKubeSchedulers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(kubeschedulersResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.KubeSchedulerList{}) + return err +} + +// Patch applies the patch and returns the patched kubeScheduler. +func (c *FakeKubeSchedulers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.KubeScheduler, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(kubeschedulersResource, name, pt, data, subresources...), &operatorv1.KubeScheduler{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeScheduler), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubestorageversionmigrator.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubestorageversionmigrator.go new file mode 100644 index 0000000000..6fa99604d0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_kubestorageversionmigrator.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKubeStorageVersionMigrators implements KubeStorageVersionMigratorInterface +type FakeKubeStorageVersionMigrators struct { + Fake *FakeOperatorV1 +} + +var kubestorageversionmigratorsResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "kubestorageversionmigrators"} + +var kubestorageversionmigratorsKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "KubeStorageVersionMigrator"} + +// Get takes name of the kubeStorageVersionMigrator, and returns the corresponding kubeStorageVersionMigrator object, and an error if there is any. +func (c *FakeKubeStorageVersionMigrators) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.KubeStorageVersionMigrator, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(kubestorageversionmigratorsResource, name), &operatorv1.KubeStorageVersionMigrator{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeStorageVersionMigrator), err +} + +// List takes label and field selectors, and returns the list of KubeStorageVersionMigrators that match those selectors. +func (c *FakeKubeStorageVersionMigrators) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.KubeStorageVersionMigratorList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(kubestorageversionmigratorsResource, kubestorageversionmigratorsKind, opts), &operatorv1.KubeStorageVersionMigratorList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.KubeStorageVersionMigratorList{ListMeta: obj.(*operatorv1.KubeStorageVersionMigratorList).ListMeta} + for _, item := range obj.(*operatorv1.KubeStorageVersionMigratorList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kubeStorageVersionMigrators. +func (c *FakeKubeStorageVersionMigrators) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(kubestorageversionmigratorsResource, opts)) +} + +// Create takes the representation of a kubeStorageVersionMigrator and creates it. Returns the server's representation of the kubeStorageVersionMigrator, and an error, if there is any. +func (c *FakeKubeStorageVersionMigrators) Create(ctx context.Context, kubeStorageVersionMigrator *operatorv1.KubeStorageVersionMigrator, opts v1.CreateOptions) (result *operatorv1.KubeStorageVersionMigrator, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(kubestorageversionmigratorsResource, kubeStorageVersionMigrator), &operatorv1.KubeStorageVersionMigrator{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeStorageVersionMigrator), err +} + +// Update takes the representation of a kubeStorageVersionMigrator and updates it. Returns the server's representation of the kubeStorageVersionMigrator, and an error, if there is any. +func (c *FakeKubeStorageVersionMigrators) Update(ctx context.Context, kubeStorageVersionMigrator *operatorv1.KubeStorageVersionMigrator, opts v1.UpdateOptions) (result *operatorv1.KubeStorageVersionMigrator, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(kubestorageversionmigratorsResource, kubeStorageVersionMigrator), &operatorv1.KubeStorageVersionMigrator{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeStorageVersionMigrator), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKubeStorageVersionMigrators) UpdateStatus(ctx context.Context, kubeStorageVersionMigrator *operatorv1.KubeStorageVersionMigrator, opts v1.UpdateOptions) (*operatorv1.KubeStorageVersionMigrator, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(kubestorageversionmigratorsResource, "status", kubeStorageVersionMigrator), &operatorv1.KubeStorageVersionMigrator{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeStorageVersionMigrator), err +} + +// Delete takes name of the kubeStorageVersionMigrator and deletes it. Returns an error if one occurs. +func (c *FakeKubeStorageVersionMigrators) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(kubestorageversionmigratorsResource, name), &operatorv1.KubeStorageVersionMigrator{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKubeStorageVersionMigrators) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(kubestorageversionmigratorsResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.KubeStorageVersionMigratorList{}) + return err +} + +// Patch applies the patch and returns the patched kubeStorageVersionMigrator. +func (c *FakeKubeStorageVersionMigrators) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.KubeStorageVersionMigrator, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(kubestorageversionmigratorsResource, name, pt, data, subresources...), &operatorv1.KubeStorageVersionMigrator{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.KubeStorageVersionMigrator), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_network.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_network.go new file mode 100644 index 0000000000..6e54f5b8da --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_network.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeNetworks implements NetworkInterface +type FakeNetworks struct { + Fake *FakeOperatorV1 +} + +var networksResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "networks"} + +var networksKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "Network"} + +// Get takes name of the network, and returns the corresponding network object, and an error if there is any. +func (c *FakeNetworks) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(networksResource, name), &operatorv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Network), err +} + +// List takes label and field selectors, and returns the list of Networks that match those selectors. +func (c *FakeNetworks) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.NetworkList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(networksResource, networksKind, opts), &operatorv1.NetworkList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.NetworkList{ListMeta: obj.(*operatorv1.NetworkList).ListMeta} + for _, item := range obj.(*operatorv1.NetworkList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested networks. +func (c *FakeNetworks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(networksResource, opts)) +} + +// Create takes the representation of a network and creates it. Returns the server's representation of the network, and an error, if there is any. +func (c *FakeNetworks) Create(ctx context.Context, network *operatorv1.Network, opts v1.CreateOptions) (result *operatorv1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(networksResource, network), &operatorv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Network), err +} + +// Update takes the representation of a network and updates it. Returns the server's representation of the network, and an error, if there is any. +func (c *FakeNetworks) Update(ctx context.Context, network *operatorv1.Network, opts v1.UpdateOptions) (result *operatorv1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(networksResource, network), &operatorv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Network), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeNetworks) UpdateStatus(ctx context.Context, network *operatorv1.Network, opts v1.UpdateOptions) (*operatorv1.Network, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(networksResource, "status", network), &operatorv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Network), err +} + +// Delete takes name of the network and deletes it. Returns an error if one occurs. +func (c *FakeNetworks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(networksResource, name), &operatorv1.Network{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeNetworks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(networksResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.NetworkList{}) + return err +} + +// Patch applies the patch and returns the patched network. +func (c *FakeNetworks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(networksResource, name, pt, data, subresources...), &operatorv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Network), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_openshiftapiserver.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_openshiftapiserver.go new file mode 100644 index 0000000000..92a620953f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_openshiftapiserver.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeOpenShiftAPIServers implements OpenShiftAPIServerInterface +type FakeOpenShiftAPIServers struct { + Fake *FakeOperatorV1 +} + +var openshiftapiserversResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "openshiftapiservers"} + +var openshiftapiserversKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "OpenShiftAPIServer"} + +// Get takes name of the openShiftAPIServer, and returns the corresponding openShiftAPIServer object, and an error if there is any. +func (c *FakeOpenShiftAPIServers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.OpenShiftAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(openshiftapiserversResource, name), &operatorv1.OpenShiftAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftAPIServer), err +} + +// List takes label and field selectors, and returns the list of OpenShiftAPIServers that match those selectors. +func (c *FakeOpenShiftAPIServers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.OpenShiftAPIServerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(openshiftapiserversResource, openshiftapiserversKind, opts), &operatorv1.OpenShiftAPIServerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.OpenShiftAPIServerList{ListMeta: obj.(*operatorv1.OpenShiftAPIServerList).ListMeta} + for _, item := range obj.(*operatorv1.OpenShiftAPIServerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested openShiftAPIServers. +func (c *FakeOpenShiftAPIServers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(openshiftapiserversResource, opts)) +} + +// Create takes the representation of a openShiftAPIServer and creates it. Returns the server's representation of the openShiftAPIServer, and an error, if there is any. +func (c *FakeOpenShiftAPIServers) Create(ctx context.Context, openShiftAPIServer *operatorv1.OpenShiftAPIServer, opts v1.CreateOptions) (result *operatorv1.OpenShiftAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(openshiftapiserversResource, openShiftAPIServer), &operatorv1.OpenShiftAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftAPIServer), err +} + +// Update takes the representation of a openShiftAPIServer and updates it. Returns the server's representation of the openShiftAPIServer, and an error, if there is any. +func (c *FakeOpenShiftAPIServers) Update(ctx context.Context, openShiftAPIServer *operatorv1.OpenShiftAPIServer, opts v1.UpdateOptions) (result *operatorv1.OpenShiftAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(openshiftapiserversResource, openShiftAPIServer), &operatorv1.OpenShiftAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftAPIServer), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeOpenShiftAPIServers) UpdateStatus(ctx context.Context, openShiftAPIServer *operatorv1.OpenShiftAPIServer, opts v1.UpdateOptions) (*operatorv1.OpenShiftAPIServer, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(openshiftapiserversResource, "status", openShiftAPIServer), &operatorv1.OpenShiftAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftAPIServer), err +} + +// Delete takes name of the openShiftAPIServer and deletes it. Returns an error if one occurs. +func (c *FakeOpenShiftAPIServers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(openshiftapiserversResource, name), &operatorv1.OpenShiftAPIServer{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeOpenShiftAPIServers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(openshiftapiserversResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.OpenShiftAPIServerList{}) + return err +} + +// Patch applies the patch and returns the patched openShiftAPIServer. +func (c *FakeOpenShiftAPIServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.OpenShiftAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(openshiftapiserversResource, name, pt, data, subresources...), &operatorv1.OpenShiftAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftAPIServer), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_openshiftcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_openshiftcontrollermanager.go new file mode 100644 index 0000000000..e59c55fe1f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_openshiftcontrollermanager.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeOpenShiftControllerManagers implements OpenShiftControllerManagerInterface +type FakeOpenShiftControllerManagers struct { + Fake *FakeOperatorV1 +} + +var openshiftcontrollermanagersResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "openshiftcontrollermanagers"} + +var openshiftcontrollermanagersKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "OpenShiftControllerManager"} + +// Get takes name of the openShiftControllerManager, and returns the corresponding openShiftControllerManager object, and an error if there is any. +func (c *FakeOpenShiftControllerManagers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.OpenShiftControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(openshiftcontrollermanagersResource, name), &operatorv1.OpenShiftControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftControllerManager), err +} + +// List takes label and field selectors, and returns the list of OpenShiftControllerManagers that match those selectors. +func (c *FakeOpenShiftControllerManagers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.OpenShiftControllerManagerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(openshiftcontrollermanagersResource, openshiftcontrollermanagersKind, opts), &operatorv1.OpenShiftControllerManagerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.OpenShiftControllerManagerList{ListMeta: obj.(*operatorv1.OpenShiftControllerManagerList).ListMeta} + for _, item := range obj.(*operatorv1.OpenShiftControllerManagerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested openShiftControllerManagers. +func (c *FakeOpenShiftControllerManagers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(openshiftcontrollermanagersResource, opts)) +} + +// Create takes the representation of a openShiftControllerManager and creates it. Returns the server's representation of the openShiftControllerManager, and an error, if there is any. +func (c *FakeOpenShiftControllerManagers) Create(ctx context.Context, openShiftControllerManager *operatorv1.OpenShiftControllerManager, opts v1.CreateOptions) (result *operatorv1.OpenShiftControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(openshiftcontrollermanagersResource, openShiftControllerManager), &operatorv1.OpenShiftControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftControllerManager), err +} + +// Update takes the representation of a openShiftControllerManager and updates it. Returns the server's representation of the openShiftControllerManager, and an error, if there is any. +func (c *FakeOpenShiftControllerManagers) Update(ctx context.Context, openShiftControllerManager *operatorv1.OpenShiftControllerManager, opts v1.UpdateOptions) (result *operatorv1.OpenShiftControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(openshiftcontrollermanagersResource, openShiftControllerManager), &operatorv1.OpenShiftControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftControllerManager), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeOpenShiftControllerManagers) UpdateStatus(ctx context.Context, openShiftControllerManager *operatorv1.OpenShiftControllerManager, opts v1.UpdateOptions) (*operatorv1.OpenShiftControllerManager, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(openshiftcontrollermanagersResource, "status", openShiftControllerManager), &operatorv1.OpenShiftControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftControllerManager), err +} + +// Delete takes name of the openShiftControllerManager and deletes it. Returns an error if one occurs. +func (c *FakeOpenShiftControllerManagers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(openshiftcontrollermanagersResource, name), &operatorv1.OpenShiftControllerManager{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeOpenShiftControllerManagers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(openshiftcontrollermanagersResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.OpenShiftControllerManagerList{}) + return err +} + +// Patch applies the patch and returns the patched openShiftControllerManager. +func (c *FakeOpenShiftControllerManagers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.OpenShiftControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(openshiftcontrollermanagersResource, name, pt, data, subresources...), &operatorv1.OpenShiftControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.OpenShiftControllerManager), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go new file mode 100644 index 0000000000..ffea2d6b44 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go @@ -0,0 +1,100 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeOperatorV1 struct { + *testing.Fake +} + +func (c *FakeOperatorV1) Authentications() v1.AuthenticationInterface { + return &FakeAuthentications{c} +} + +func (c *FakeOperatorV1) CSISnapshotControllers() v1.CSISnapshotControllerInterface { + return &FakeCSISnapshotControllers{c} +} + +func (c *FakeOperatorV1) CloudCredentials() v1.CloudCredentialInterface { + return &FakeCloudCredentials{c} +} + +func (c *FakeOperatorV1) ClusterCSIDrivers() v1.ClusterCSIDriverInterface { + return &FakeClusterCSIDrivers{c} +} + +func (c *FakeOperatorV1) Configs() v1.ConfigInterface { + return &FakeConfigs{c} +} + +func (c *FakeOperatorV1) Consoles() v1.ConsoleInterface { + return &FakeConsoles{c} +} + +func (c *FakeOperatorV1) DNSes() v1.DNSInterface { + return &FakeDNSes{c} +} + +func (c *FakeOperatorV1) Etcds() v1.EtcdInterface { + return &FakeEtcds{c} +} + +func (c *FakeOperatorV1) IngressControllers(namespace string) v1.IngressControllerInterface { + return &FakeIngressControllers{c, namespace} +} + +func (c *FakeOperatorV1) KubeAPIServers() v1.KubeAPIServerInterface { + return &FakeKubeAPIServers{c} +} + +func (c *FakeOperatorV1) KubeControllerManagers() v1.KubeControllerManagerInterface { + return &FakeKubeControllerManagers{c} +} + +func (c *FakeOperatorV1) KubeSchedulers() v1.KubeSchedulerInterface { + return &FakeKubeSchedulers{c} +} + +func (c *FakeOperatorV1) KubeStorageVersionMigrators() v1.KubeStorageVersionMigratorInterface { + return &FakeKubeStorageVersionMigrators{c} +} + +func (c *FakeOperatorV1) Networks() v1.NetworkInterface { + return &FakeNetworks{c} +} + +func (c *FakeOperatorV1) OpenShiftAPIServers() v1.OpenShiftAPIServerInterface { + return &FakeOpenShiftAPIServers{c} +} + +func (c *FakeOperatorV1) OpenShiftControllerManagers() v1.OpenShiftControllerManagerInterface { + return &FakeOpenShiftControllerManagers{c} +} + +func (c *FakeOperatorV1) ServiceCAs() v1.ServiceCAInterface { + return &FakeServiceCAs{c} +} + +func (c *FakeOperatorV1) ServiceCatalogAPIServers() v1.ServiceCatalogAPIServerInterface { + return &FakeServiceCatalogAPIServers{c} +} + +func (c *FakeOperatorV1) ServiceCatalogControllerManagers() v1.ServiceCatalogControllerManagerInterface { + return &FakeServiceCatalogControllerManagers{c} +} + +func (c *FakeOperatorV1) Storages() v1.StorageInterface { + return &FakeStorages{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeOperatorV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_serviceca.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_serviceca.go new file mode 100644 index 0000000000..22cefb2e62 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_serviceca.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeServiceCAs implements ServiceCAInterface +type FakeServiceCAs struct { + Fake *FakeOperatorV1 +} + +var servicecasResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "servicecas"} + +var servicecasKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "ServiceCA"} + +// Get takes name of the serviceCA, and returns the corresponding serviceCA object, and an error if there is any. +func (c *FakeServiceCAs) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.ServiceCA, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(servicecasResource, name), &operatorv1.ServiceCA{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCA), err +} + +// List takes label and field selectors, and returns the list of ServiceCAs that match those selectors. +func (c *FakeServiceCAs) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.ServiceCAList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(servicecasResource, servicecasKind, opts), &operatorv1.ServiceCAList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.ServiceCAList{ListMeta: obj.(*operatorv1.ServiceCAList).ListMeta} + for _, item := range obj.(*operatorv1.ServiceCAList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceCAs. +func (c *FakeServiceCAs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(servicecasResource, opts)) +} + +// Create takes the representation of a serviceCA and creates it. Returns the server's representation of the serviceCA, and an error, if there is any. +func (c *FakeServiceCAs) Create(ctx context.Context, serviceCA *operatorv1.ServiceCA, opts v1.CreateOptions) (result *operatorv1.ServiceCA, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(servicecasResource, serviceCA), &operatorv1.ServiceCA{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCA), err +} + +// Update takes the representation of a serviceCA and updates it. Returns the server's representation of the serviceCA, and an error, if there is any. +func (c *FakeServiceCAs) Update(ctx context.Context, serviceCA *operatorv1.ServiceCA, opts v1.UpdateOptions) (result *operatorv1.ServiceCA, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(servicecasResource, serviceCA), &operatorv1.ServiceCA{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCA), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServiceCAs) UpdateStatus(ctx context.Context, serviceCA *operatorv1.ServiceCA, opts v1.UpdateOptions) (*operatorv1.ServiceCA, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(servicecasResource, "status", serviceCA), &operatorv1.ServiceCA{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCA), err +} + +// Delete takes name of the serviceCA and deletes it. Returns an error if one occurs. +func (c *FakeServiceCAs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(servicecasResource, name), &operatorv1.ServiceCA{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceCAs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(servicecasResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.ServiceCAList{}) + return err +} + +// Patch applies the patch and returns the patched serviceCA. +func (c *FakeServiceCAs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.ServiceCA, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(servicecasResource, name, pt, data, subresources...), &operatorv1.ServiceCA{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCA), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_servicecatalogapiserver.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_servicecatalogapiserver.go new file mode 100644 index 0000000000..ede8c3d66e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_servicecatalogapiserver.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeServiceCatalogAPIServers implements ServiceCatalogAPIServerInterface +type FakeServiceCatalogAPIServers struct { + Fake *FakeOperatorV1 +} + +var servicecatalogapiserversResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "servicecatalogapiservers"} + +var servicecatalogapiserversKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "ServiceCatalogAPIServer"} + +// Get takes name of the serviceCatalogAPIServer, and returns the corresponding serviceCatalogAPIServer object, and an error if there is any. +func (c *FakeServiceCatalogAPIServers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.ServiceCatalogAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(servicecatalogapiserversResource, name), &operatorv1.ServiceCatalogAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogAPIServer), err +} + +// List takes label and field selectors, and returns the list of ServiceCatalogAPIServers that match those selectors. +func (c *FakeServiceCatalogAPIServers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.ServiceCatalogAPIServerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(servicecatalogapiserversResource, servicecatalogapiserversKind, opts), &operatorv1.ServiceCatalogAPIServerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.ServiceCatalogAPIServerList{ListMeta: obj.(*operatorv1.ServiceCatalogAPIServerList).ListMeta} + for _, item := range obj.(*operatorv1.ServiceCatalogAPIServerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceCatalogAPIServers. +func (c *FakeServiceCatalogAPIServers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(servicecatalogapiserversResource, opts)) +} + +// Create takes the representation of a serviceCatalogAPIServer and creates it. Returns the server's representation of the serviceCatalogAPIServer, and an error, if there is any. +func (c *FakeServiceCatalogAPIServers) Create(ctx context.Context, serviceCatalogAPIServer *operatorv1.ServiceCatalogAPIServer, opts v1.CreateOptions) (result *operatorv1.ServiceCatalogAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(servicecatalogapiserversResource, serviceCatalogAPIServer), &operatorv1.ServiceCatalogAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogAPIServer), err +} + +// Update takes the representation of a serviceCatalogAPIServer and updates it. Returns the server's representation of the serviceCatalogAPIServer, and an error, if there is any. +func (c *FakeServiceCatalogAPIServers) Update(ctx context.Context, serviceCatalogAPIServer *operatorv1.ServiceCatalogAPIServer, opts v1.UpdateOptions) (result *operatorv1.ServiceCatalogAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(servicecatalogapiserversResource, serviceCatalogAPIServer), &operatorv1.ServiceCatalogAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogAPIServer), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServiceCatalogAPIServers) UpdateStatus(ctx context.Context, serviceCatalogAPIServer *operatorv1.ServiceCatalogAPIServer, opts v1.UpdateOptions) (*operatorv1.ServiceCatalogAPIServer, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(servicecatalogapiserversResource, "status", serviceCatalogAPIServer), &operatorv1.ServiceCatalogAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogAPIServer), err +} + +// Delete takes name of the serviceCatalogAPIServer and deletes it. Returns an error if one occurs. +func (c *FakeServiceCatalogAPIServers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(servicecatalogapiserversResource, name), &operatorv1.ServiceCatalogAPIServer{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceCatalogAPIServers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(servicecatalogapiserversResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.ServiceCatalogAPIServerList{}) + return err +} + +// Patch applies the patch and returns the patched serviceCatalogAPIServer. +func (c *FakeServiceCatalogAPIServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.ServiceCatalogAPIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(servicecatalogapiserversResource, name, pt, data, subresources...), &operatorv1.ServiceCatalogAPIServer{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogAPIServer), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_servicecatalogcontrollermanager.go new file mode 100644 index 0000000000..a0afe80b22 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_servicecatalogcontrollermanager.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeServiceCatalogControllerManagers implements ServiceCatalogControllerManagerInterface +type FakeServiceCatalogControllerManagers struct { + Fake *FakeOperatorV1 +} + +var servicecatalogcontrollermanagersResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "servicecatalogcontrollermanagers"} + +var servicecatalogcontrollermanagersKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "ServiceCatalogControllerManager"} + +// Get takes name of the serviceCatalogControllerManager, and returns the corresponding serviceCatalogControllerManager object, and an error if there is any. +func (c *FakeServiceCatalogControllerManagers) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.ServiceCatalogControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(servicecatalogcontrollermanagersResource, name), &operatorv1.ServiceCatalogControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogControllerManager), err +} + +// List takes label and field selectors, and returns the list of ServiceCatalogControllerManagers that match those selectors. +func (c *FakeServiceCatalogControllerManagers) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.ServiceCatalogControllerManagerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(servicecatalogcontrollermanagersResource, servicecatalogcontrollermanagersKind, opts), &operatorv1.ServiceCatalogControllerManagerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.ServiceCatalogControllerManagerList{ListMeta: obj.(*operatorv1.ServiceCatalogControllerManagerList).ListMeta} + for _, item := range obj.(*operatorv1.ServiceCatalogControllerManagerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serviceCatalogControllerManagers. +func (c *FakeServiceCatalogControllerManagers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(servicecatalogcontrollermanagersResource, opts)) +} + +// Create takes the representation of a serviceCatalogControllerManager and creates it. Returns the server's representation of the serviceCatalogControllerManager, and an error, if there is any. +func (c *FakeServiceCatalogControllerManagers) Create(ctx context.Context, serviceCatalogControllerManager *operatorv1.ServiceCatalogControllerManager, opts v1.CreateOptions) (result *operatorv1.ServiceCatalogControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(servicecatalogcontrollermanagersResource, serviceCatalogControllerManager), &operatorv1.ServiceCatalogControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogControllerManager), err +} + +// Update takes the representation of a serviceCatalogControllerManager and updates it. Returns the server's representation of the serviceCatalogControllerManager, and an error, if there is any. +func (c *FakeServiceCatalogControllerManagers) Update(ctx context.Context, serviceCatalogControllerManager *operatorv1.ServiceCatalogControllerManager, opts v1.UpdateOptions) (result *operatorv1.ServiceCatalogControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(servicecatalogcontrollermanagersResource, serviceCatalogControllerManager), &operatorv1.ServiceCatalogControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogControllerManager), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServiceCatalogControllerManagers) UpdateStatus(ctx context.Context, serviceCatalogControllerManager *operatorv1.ServiceCatalogControllerManager, opts v1.UpdateOptions) (*operatorv1.ServiceCatalogControllerManager, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(servicecatalogcontrollermanagersResource, "status", serviceCatalogControllerManager), &operatorv1.ServiceCatalogControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogControllerManager), err +} + +// Delete takes name of the serviceCatalogControllerManager and deletes it. Returns an error if one occurs. +func (c *FakeServiceCatalogControllerManagers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(servicecatalogcontrollermanagersResource, name), &operatorv1.ServiceCatalogControllerManager{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServiceCatalogControllerManagers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(servicecatalogcontrollermanagersResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.ServiceCatalogControllerManagerList{}) + return err +} + +// Patch applies the patch and returns the patched serviceCatalogControllerManager. +func (c *FakeServiceCatalogControllerManagers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.ServiceCatalogControllerManager, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(servicecatalogcontrollermanagersResource, name, pt, data, subresources...), &operatorv1.ServiceCatalogControllerManager{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.ServiceCatalogControllerManager), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_storage.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_storage.go new file mode 100644 index 0000000000..f3a23795f5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_storage.go @@ -0,0 +1,117 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeStorages implements StorageInterface +type FakeStorages struct { + Fake *FakeOperatorV1 +} + +var storagesResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "storages"} + +var storagesKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "Storage"} + +// Get takes name of the storage, and returns the corresponding storage object, and an error if there is any. +func (c *FakeStorages) Get(ctx context.Context, name string, options v1.GetOptions) (result *operatorv1.Storage, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(storagesResource, name), &operatorv1.Storage{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Storage), err +} + +// List takes label and field selectors, and returns the list of Storages that match those selectors. +func (c *FakeStorages) List(ctx context.Context, opts v1.ListOptions) (result *operatorv1.StorageList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(storagesResource, storagesKind, opts), &operatorv1.StorageList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operatorv1.StorageList{ListMeta: obj.(*operatorv1.StorageList).ListMeta} + for _, item := range obj.(*operatorv1.StorageList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested storages. +func (c *FakeStorages) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(storagesResource, opts)) +} + +// Create takes the representation of a storage and creates it. Returns the server's representation of the storage, and an error, if there is any. +func (c *FakeStorages) Create(ctx context.Context, storage *operatorv1.Storage, opts v1.CreateOptions) (result *operatorv1.Storage, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(storagesResource, storage), &operatorv1.Storage{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Storage), err +} + +// Update takes the representation of a storage and updates it. Returns the server's representation of the storage, and an error, if there is any. +func (c *FakeStorages) Update(ctx context.Context, storage *operatorv1.Storage, opts v1.UpdateOptions) (result *operatorv1.Storage, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(storagesResource, storage), &operatorv1.Storage{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Storage), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeStorages) UpdateStatus(ctx context.Context, storage *operatorv1.Storage, opts v1.UpdateOptions) (*operatorv1.Storage, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(storagesResource, "status", storage), &operatorv1.Storage{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Storage), err +} + +// Delete takes name of the storage and deletes it. Returns an error if one occurs. +func (c *FakeStorages) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(storagesResource, name), &operatorv1.Storage{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStorages) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(storagesResource, listOpts) + + _, err := c.Fake.Invokes(action, &operatorv1.StorageList{}) + return err +} + +// Patch applies the patch and returns the patched storage. +func (c *FakeStorages) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *operatorv1.Storage, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(storagesResource, name, pt, data, subresources...), &operatorv1.Storage{}) + if obj == nil { + return nil, err + } + return obj.(*operatorv1.Storage), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go new file mode 100644 index 0000000000..b3894c9555 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go @@ -0,0 +1,43 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type AuthenticationExpansion interface{} + +type CSISnapshotControllerExpansion interface{} + +type CloudCredentialExpansion interface{} + +type ClusterCSIDriverExpansion interface{} + +type ConfigExpansion interface{} + +type ConsoleExpansion interface{} + +type DNSExpansion interface{} + +type EtcdExpansion interface{} + +type IngressControllerExpansion interface{} + +type KubeAPIServerExpansion interface{} + +type KubeControllerManagerExpansion interface{} + +type KubeSchedulerExpansion interface{} + +type KubeStorageVersionMigratorExpansion interface{} + +type NetworkExpansion interface{} + +type OpenShiftAPIServerExpansion interface{} + +type OpenShiftControllerManagerExpansion interface{} + +type ServiceCAExpansion interface{} + +type ServiceCatalogAPIServerExpansion interface{} + +type ServiceCatalogControllerManagerExpansion interface{} + +type StorageExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/ingresscontroller.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/ingresscontroller.go new file mode 100644 index 0000000000..3ec5371eb3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/ingresscontroller.go @@ -0,0 +1,179 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// IngressControllersGetter has a method to return a IngressControllerInterface. +// A group's client should implement this interface. +type IngressControllersGetter interface { + IngressControllers(namespace string) IngressControllerInterface +} + +// IngressControllerInterface has methods to work with IngressController resources. +type IngressControllerInterface interface { + Create(ctx context.Context, ingressController *v1.IngressController, opts metav1.CreateOptions) (*v1.IngressController, error) + Update(ctx context.Context, ingressController *v1.IngressController, opts metav1.UpdateOptions) (*v1.IngressController, error) + UpdateStatus(ctx context.Context, ingressController *v1.IngressController, opts metav1.UpdateOptions) (*v1.IngressController, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.IngressController, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressControllerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressController, err error) + IngressControllerExpansion +} + +// ingressControllers implements IngressControllerInterface +type ingressControllers struct { + client rest.Interface + ns string +} + +// newIngressControllers returns a IngressControllers +func newIngressControllers(c *OperatorV1Client, namespace string) *ingressControllers { + return &ingressControllers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the ingressController, and returns the corresponding ingressController object, and an error if there is any. +func (c *ingressControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresscontrollers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IngressControllers that match those selectors. +func (c *ingressControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressControllerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.IngressControllerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresscontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingressControllers. +func (c *ingressControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ingresscontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a ingressController and creates it. Returns the server's representation of the ingressController, and an error, if there is any. +func (c *ingressControllers) Create(ctx context.Context, ingressController *v1.IngressController, opts metav1.CreateOptions) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresscontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ingressController). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a ingressController and updates it. Returns the server's representation of the ingressController, and an error, if there is any. +func (c *ingressControllers) Update(ctx context.Context, ingressController *v1.IngressController, opts metav1.UpdateOptions) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresscontrollers"). + Name(ingressController.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ingressController). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *ingressControllers) UpdateStatus(ctx context.Context, ingressController *v1.IngressController, opts metav1.UpdateOptions) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresscontrollers"). + Name(ingressController.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(ingressController). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the ingressController and deletes it. Returns an error if one occurs. +func (c *ingressControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresscontrollers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingressControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresscontrollers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched ingressController. +func (c *ingressControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("ingresscontrollers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubeapiserver.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubeapiserver.go new file mode 100644 index 0000000000..a03e58ea8b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubeapiserver.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KubeAPIServersGetter has a method to return a KubeAPIServerInterface. +// A group's client should implement this interface. +type KubeAPIServersGetter interface { + KubeAPIServers() KubeAPIServerInterface +} + +// KubeAPIServerInterface has methods to work with KubeAPIServer resources. +type KubeAPIServerInterface interface { + Create(ctx context.Context, kubeAPIServer *v1.KubeAPIServer, opts metav1.CreateOptions) (*v1.KubeAPIServer, error) + Update(ctx context.Context, kubeAPIServer *v1.KubeAPIServer, opts metav1.UpdateOptions) (*v1.KubeAPIServer, error) + UpdateStatus(ctx context.Context, kubeAPIServer *v1.KubeAPIServer, opts metav1.UpdateOptions) (*v1.KubeAPIServer, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.KubeAPIServer, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.KubeAPIServerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KubeAPIServer, err error) + KubeAPIServerExpansion +} + +// kubeAPIServers implements KubeAPIServerInterface +type kubeAPIServers struct { + client rest.Interface +} + +// newKubeAPIServers returns a KubeAPIServers +func newKubeAPIServers(c *OperatorV1Client) *kubeAPIServers { + return &kubeAPIServers{ + client: c.RESTClient(), + } +} + +// Get takes name of the kubeAPIServer, and returns the corresponding kubeAPIServer object, and an error if there is any. +func (c *kubeAPIServers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.KubeAPIServer, err error) { + result = &v1.KubeAPIServer{} + err = c.client.Get(). + Resource("kubeapiservers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KubeAPIServers that match those selectors. +func (c *kubeAPIServers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.KubeAPIServerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.KubeAPIServerList{} + err = c.client.Get(). + Resource("kubeapiservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kubeAPIServers. +func (c *kubeAPIServers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("kubeapiservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kubeAPIServer and creates it. Returns the server's representation of the kubeAPIServer, and an error, if there is any. +func (c *kubeAPIServers) Create(ctx context.Context, kubeAPIServer *v1.KubeAPIServer, opts metav1.CreateOptions) (result *v1.KubeAPIServer, err error) { + result = &v1.KubeAPIServer{} + err = c.client.Post(). + Resource("kubeapiservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeAPIServer). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kubeAPIServer and updates it. Returns the server's representation of the kubeAPIServer, and an error, if there is any. +func (c *kubeAPIServers) Update(ctx context.Context, kubeAPIServer *v1.KubeAPIServer, opts metav1.UpdateOptions) (result *v1.KubeAPIServer, err error) { + result = &v1.KubeAPIServer{} + err = c.client.Put(). + Resource("kubeapiservers"). + Name(kubeAPIServer.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeAPIServer). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kubeAPIServers) UpdateStatus(ctx context.Context, kubeAPIServer *v1.KubeAPIServer, opts metav1.UpdateOptions) (result *v1.KubeAPIServer, err error) { + result = &v1.KubeAPIServer{} + err = c.client.Put(). + Resource("kubeapiservers"). + Name(kubeAPIServer.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeAPIServer). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kubeAPIServer and deletes it. Returns an error if one occurs. +func (c *kubeAPIServers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("kubeapiservers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kubeAPIServers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("kubeapiservers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kubeAPIServer. +func (c *kubeAPIServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KubeAPIServer, err error) { + result = &v1.KubeAPIServer{} + err = c.client.Patch(pt). + Resource("kubeapiservers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubecontrollermanager.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubecontrollermanager.go new file mode 100644 index 0000000000..6093bf5711 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubecontrollermanager.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KubeControllerManagersGetter has a method to return a KubeControllerManagerInterface. +// A group's client should implement this interface. +type KubeControllerManagersGetter interface { + KubeControllerManagers() KubeControllerManagerInterface +} + +// KubeControllerManagerInterface has methods to work with KubeControllerManager resources. +type KubeControllerManagerInterface interface { + Create(ctx context.Context, kubeControllerManager *v1.KubeControllerManager, opts metav1.CreateOptions) (*v1.KubeControllerManager, error) + Update(ctx context.Context, kubeControllerManager *v1.KubeControllerManager, opts metav1.UpdateOptions) (*v1.KubeControllerManager, error) + UpdateStatus(ctx context.Context, kubeControllerManager *v1.KubeControllerManager, opts metav1.UpdateOptions) (*v1.KubeControllerManager, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.KubeControllerManager, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.KubeControllerManagerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KubeControllerManager, err error) + KubeControllerManagerExpansion +} + +// kubeControllerManagers implements KubeControllerManagerInterface +type kubeControllerManagers struct { + client rest.Interface +} + +// newKubeControllerManagers returns a KubeControllerManagers +func newKubeControllerManagers(c *OperatorV1Client) *kubeControllerManagers { + return &kubeControllerManagers{ + client: c.RESTClient(), + } +} + +// Get takes name of the kubeControllerManager, and returns the corresponding kubeControllerManager object, and an error if there is any. +func (c *kubeControllerManagers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.KubeControllerManager, err error) { + result = &v1.KubeControllerManager{} + err = c.client.Get(). + Resource("kubecontrollermanagers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KubeControllerManagers that match those selectors. +func (c *kubeControllerManagers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.KubeControllerManagerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.KubeControllerManagerList{} + err = c.client.Get(). + Resource("kubecontrollermanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kubeControllerManagers. +func (c *kubeControllerManagers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("kubecontrollermanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kubeControllerManager and creates it. Returns the server's representation of the kubeControllerManager, and an error, if there is any. +func (c *kubeControllerManagers) Create(ctx context.Context, kubeControllerManager *v1.KubeControllerManager, opts metav1.CreateOptions) (result *v1.KubeControllerManager, err error) { + result = &v1.KubeControllerManager{} + err = c.client.Post(). + Resource("kubecontrollermanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeControllerManager). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kubeControllerManager and updates it. Returns the server's representation of the kubeControllerManager, and an error, if there is any. +func (c *kubeControllerManagers) Update(ctx context.Context, kubeControllerManager *v1.KubeControllerManager, opts metav1.UpdateOptions) (result *v1.KubeControllerManager, err error) { + result = &v1.KubeControllerManager{} + err = c.client.Put(). + Resource("kubecontrollermanagers"). + Name(kubeControllerManager.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeControllerManager). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kubeControllerManagers) UpdateStatus(ctx context.Context, kubeControllerManager *v1.KubeControllerManager, opts metav1.UpdateOptions) (result *v1.KubeControllerManager, err error) { + result = &v1.KubeControllerManager{} + err = c.client.Put(). + Resource("kubecontrollermanagers"). + Name(kubeControllerManager.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeControllerManager). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kubeControllerManager and deletes it. Returns an error if one occurs. +func (c *kubeControllerManagers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("kubecontrollermanagers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kubeControllerManagers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("kubecontrollermanagers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kubeControllerManager. +func (c *kubeControllerManagers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KubeControllerManager, err error) { + result = &v1.KubeControllerManager{} + err = c.client.Patch(pt). + Resource("kubecontrollermanagers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubescheduler.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubescheduler.go new file mode 100644 index 0000000000..2188b87885 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubescheduler.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KubeSchedulersGetter has a method to return a KubeSchedulerInterface. +// A group's client should implement this interface. +type KubeSchedulersGetter interface { + KubeSchedulers() KubeSchedulerInterface +} + +// KubeSchedulerInterface has methods to work with KubeScheduler resources. +type KubeSchedulerInterface interface { + Create(ctx context.Context, kubeScheduler *v1.KubeScheduler, opts metav1.CreateOptions) (*v1.KubeScheduler, error) + Update(ctx context.Context, kubeScheduler *v1.KubeScheduler, opts metav1.UpdateOptions) (*v1.KubeScheduler, error) + UpdateStatus(ctx context.Context, kubeScheduler *v1.KubeScheduler, opts metav1.UpdateOptions) (*v1.KubeScheduler, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.KubeScheduler, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.KubeSchedulerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KubeScheduler, err error) + KubeSchedulerExpansion +} + +// kubeSchedulers implements KubeSchedulerInterface +type kubeSchedulers struct { + client rest.Interface +} + +// newKubeSchedulers returns a KubeSchedulers +func newKubeSchedulers(c *OperatorV1Client) *kubeSchedulers { + return &kubeSchedulers{ + client: c.RESTClient(), + } +} + +// Get takes name of the kubeScheduler, and returns the corresponding kubeScheduler object, and an error if there is any. +func (c *kubeSchedulers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.KubeScheduler, err error) { + result = &v1.KubeScheduler{} + err = c.client.Get(). + Resource("kubeschedulers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KubeSchedulers that match those selectors. +func (c *kubeSchedulers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.KubeSchedulerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.KubeSchedulerList{} + err = c.client.Get(). + Resource("kubeschedulers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kubeSchedulers. +func (c *kubeSchedulers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("kubeschedulers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kubeScheduler and creates it. Returns the server's representation of the kubeScheduler, and an error, if there is any. +func (c *kubeSchedulers) Create(ctx context.Context, kubeScheduler *v1.KubeScheduler, opts metav1.CreateOptions) (result *v1.KubeScheduler, err error) { + result = &v1.KubeScheduler{} + err = c.client.Post(). + Resource("kubeschedulers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeScheduler). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kubeScheduler and updates it. Returns the server's representation of the kubeScheduler, and an error, if there is any. +func (c *kubeSchedulers) Update(ctx context.Context, kubeScheduler *v1.KubeScheduler, opts metav1.UpdateOptions) (result *v1.KubeScheduler, err error) { + result = &v1.KubeScheduler{} + err = c.client.Put(). + Resource("kubeschedulers"). + Name(kubeScheduler.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeScheduler). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kubeSchedulers) UpdateStatus(ctx context.Context, kubeScheduler *v1.KubeScheduler, opts metav1.UpdateOptions) (result *v1.KubeScheduler, err error) { + result = &v1.KubeScheduler{} + err = c.client.Put(). + Resource("kubeschedulers"). + Name(kubeScheduler.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeScheduler). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kubeScheduler and deletes it. Returns an error if one occurs. +func (c *kubeSchedulers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("kubeschedulers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kubeSchedulers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("kubeschedulers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kubeScheduler. +func (c *kubeSchedulers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KubeScheduler, err error) { + result = &v1.KubeScheduler{} + err = c.client.Patch(pt). + Resource("kubeschedulers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubestorageversionmigrator.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubestorageversionmigrator.go new file mode 100644 index 0000000000..41f44a253f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/kubestorageversionmigrator.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KubeStorageVersionMigratorsGetter has a method to return a KubeStorageVersionMigratorInterface. +// A group's client should implement this interface. +type KubeStorageVersionMigratorsGetter interface { + KubeStorageVersionMigrators() KubeStorageVersionMigratorInterface +} + +// KubeStorageVersionMigratorInterface has methods to work with KubeStorageVersionMigrator resources. +type KubeStorageVersionMigratorInterface interface { + Create(ctx context.Context, kubeStorageVersionMigrator *v1.KubeStorageVersionMigrator, opts metav1.CreateOptions) (*v1.KubeStorageVersionMigrator, error) + Update(ctx context.Context, kubeStorageVersionMigrator *v1.KubeStorageVersionMigrator, opts metav1.UpdateOptions) (*v1.KubeStorageVersionMigrator, error) + UpdateStatus(ctx context.Context, kubeStorageVersionMigrator *v1.KubeStorageVersionMigrator, opts metav1.UpdateOptions) (*v1.KubeStorageVersionMigrator, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.KubeStorageVersionMigrator, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.KubeStorageVersionMigratorList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KubeStorageVersionMigrator, err error) + KubeStorageVersionMigratorExpansion +} + +// kubeStorageVersionMigrators implements KubeStorageVersionMigratorInterface +type kubeStorageVersionMigrators struct { + client rest.Interface +} + +// newKubeStorageVersionMigrators returns a KubeStorageVersionMigrators +func newKubeStorageVersionMigrators(c *OperatorV1Client) *kubeStorageVersionMigrators { + return &kubeStorageVersionMigrators{ + client: c.RESTClient(), + } +} + +// Get takes name of the kubeStorageVersionMigrator, and returns the corresponding kubeStorageVersionMigrator object, and an error if there is any. +func (c *kubeStorageVersionMigrators) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.KubeStorageVersionMigrator, err error) { + result = &v1.KubeStorageVersionMigrator{} + err = c.client.Get(). + Resource("kubestorageversionmigrators"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KubeStorageVersionMigrators that match those selectors. +func (c *kubeStorageVersionMigrators) List(ctx context.Context, opts metav1.ListOptions) (result *v1.KubeStorageVersionMigratorList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.KubeStorageVersionMigratorList{} + err = c.client.Get(). + Resource("kubestorageversionmigrators"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kubeStorageVersionMigrators. +func (c *kubeStorageVersionMigrators) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("kubestorageversionmigrators"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kubeStorageVersionMigrator and creates it. Returns the server's representation of the kubeStorageVersionMigrator, and an error, if there is any. +func (c *kubeStorageVersionMigrators) Create(ctx context.Context, kubeStorageVersionMigrator *v1.KubeStorageVersionMigrator, opts metav1.CreateOptions) (result *v1.KubeStorageVersionMigrator, err error) { + result = &v1.KubeStorageVersionMigrator{} + err = c.client.Post(). + Resource("kubestorageversionmigrators"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeStorageVersionMigrator). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kubeStorageVersionMigrator and updates it. Returns the server's representation of the kubeStorageVersionMigrator, and an error, if there is any. +func (c *kubeStorageVersionMigrators) Update(ctx context.Context, kubeStorageVersionMigrator *v1.KubeStorageVersionMigrator, opts metav1.UpdateOptions) (result *v1.KubeStorageVersionMigrator, err error) { + result = &v1.KubeStorageVersionMigrator{} + err = c.client.Put(). + Resource("kubestorageversionmigrators"). + Name(kubeStorageVersionMigrator.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeStorageVersionMigrator). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kubeStorageVersionMigrators) UpdateStatus(ctx context.Context, kubeStorageVersionMigrator *v1.KubeStorageVersionMigrator, opts metav1.UpdateOptions) (result *v1.KubeStorageVersionMigrator, err error) { + result = &v1.KubeStorageVersionMigrator{} + err = c.client.Put(). + Resource("kubestorageversionmigrators"). + Name(kubeStorageVersionMigrator.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kubeStorageVersionMigrator). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kubeStorageVersionMigrator and deletes it. Returns an error if one occurs. +func (c *kubeStorageVersionMigrators) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("kubestorageversionmigrators"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kubeStorageVersionMigrators) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("kubestorageversionmigrators"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kubeStorageVersionMigrator. +func (c *kubeStorageVersionMigrators) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KubeStorageVersionMigrator, err error) { + result = &v1.KubeStorageVersionMigrator{} + err = c.client.Patch(pt). + Resource("kubestorageversionmigrators"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/network.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/network.go new file mode 100644 index 0000000000..3b624e6ac2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/network.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// NetworksGetter has a method to return a NetworkInterface. +// A group's client should implement this interface. +type NetworksGetter interface { + Networks() NetworkInterface +} + +// NetworkInterface has methods to work with Network resources. +type NetworkInterface interface { + Create(ctx context.Context, network *v1.Network, opts metav1.CreateOptions) (*v1.Network, error) + Update(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (*v1.Network, error) + UpdateStatus(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (*v1.Network, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Network, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Network, err error) + NetworkExpansion +} + +// networks implements NetworkInterface +type networks struct { + client rest.Interface +} + +// newNetworks returns a Networks +func newNetworks(c *OperatorV1Client) *networks { + return &networks{ + client: c.RESTClient(), + } +} + +// Get takes name of the network, and returns the corresponding network object, and an error if there is any. +func (c *networks) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Get(). + Resource("networks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Networks that match those selectors. +func (c *networks) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.NetworkList{} + err = c.client.Get(). + Resource("networks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested networks. +func (c *networks) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("networks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a network and creates it. Returns the server's representation of the network, and an error, if there is any. +func (c *networks) Create(ctx context.Context, network *v1.Network, opts metav1.CreateOptions) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Post(). + Resource("networks"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(network). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a network and updates it. Returns the server's representation of the network, and an error, if there is any. +func (c *networks) Update(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Put(). + Resource("networks"). + Name(network.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(network). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *networks) UpdateStatus(ctx context.Context, network *v1.Network, opts metav1.UpdateOptions) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Put(). + Resource("networks"). + Name(network.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(network). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the network and deletes it. Returns an error if one occurs. +func (c *networks) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("networks"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *networks) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("networks"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched network. +func (c *networks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Patch(pt). + Resource("networks"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/openshiftapiserver.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/openshiftapiserver.go new file mode 100644 index 0000000000..dc7f9e6098 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/openshiftapiserver.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// OpenShiftAPIServersGetter has a method to return a OpenShiftAPIServerInterface. +// A group's client should implement this interface. +type OpenShiftAPIServersGetter interface { + OpenShiftAPIServers() OpenShiftAPIServerInterface +} + +// OpenShiftAPIServerInterface has methods to work with OpenShiftAPIServer resources. +type OpenShiftAPIServerInterface interface { + Create(ctx context.Context, openShiftAPIServer *v1.OpenShiftAPIServer, opts metav1.CreateOptions) (*v1.OpenShiftAPIServer, error) + Update(ctx context.Context, openShiftAPIServer *v1.OpenShiftAPIServer, opts metav1.UpdateOptions) (*v1.OpenShiftAPIServer, error) + UpdateStatus(ctx context.Context, openShiftAPIServer *v1.OpenShiftAPIServer, opts metav1.UpdateOptions) (*v1.OpenShiftAPIServer, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.OpenShiftAPIServer, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.OpenShiftAPIServerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OpenShiftAPIServer, err error) + OpenShiftAPIServerExpansion +} + +// openShiftAPIServers implements OpenShiftAPIServerInterface +type openShiftAPIServers struct { + client rest.Interface +} + +// newOpenShiftAPIServers returns a OpenShiftAPIServers +func newOpenShiftAPIServers(c *OperatorV1Client) *openShiftAPIServers { + return &openShiftAPIServers{ + client: c.RESTClient(), + } +} + +// Get takes name of the openShiftAPIServer, and returns the corresponding openShiftAPIServer object, and an error if there is any. +func (c *openShiftAPIServers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.OpenShiftAPIServer, err error) { + result = &v1.OpenShiftAPIServer{} + err = c.client.Get(). + Resource("openshiftapiservers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of OpenShiftAPIServers that match those selectors. +func (c *openShiftAPIServers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.OpenShiftAPIServerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.OpenShiftAPIServerList{} + err = c.client.Get(). + Resource("openshiftapiservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested openShiftAPIServers. +func (c *openShiftAPIServers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("openshiftapiservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a openShiftAPIServer and creates it. Returns the server's representation of the openShiftAPIServer, and an error, if there is any. +func (c *openShiftAPIServers) Create(ctx context.Context, openShiftAPIServer *v1.OpenShiftAPIServer, opts metav1.CreateOptions) (result *v1.OpenShiftAPIServer, err error) { + result = &v1.OpenShiftAPIServer{} + err = c.client.Post(). + Resource("openshiftapiservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(openShiftAPIServer). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a openShiftAPIServer and updates it. Returns the server's representation of the openShiftAPIServer, and an error, if there is any. +func (c *openShiftAPIServers) Update(ctx context.Context, openShiftAPIServer *v1.OpenShiftAPIServer, opts metav1.UpdateOptions) (result *v1.OpenShiftAPIServer, err error) { + result = &v1.OpenShiftAPIServer{} + err = c.client.Put(). + Resource("openshiftapiservers"). + Name(openShiftAPIServer.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(openShiftAPIServer). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *openShiftAPIServers) UpdateStatus(ctx context.Context, openShiftAPIServer *v1.OpenShiftAPIServer, opts metav1.UpdateOptions) (result *v1.OpenShiftAPIServer, err error) { + result = &v1.OpenShiftAPIServer{} + err = c.client.Put(). + Resource("openshiftapiservers"). + Name(openShiftAPIServer.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(openShiftAPIServer). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the openShiftAPIServer and deletes it. Returns an error if one occurs. +func (c *openShiftAPIServers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("openshiftapiservers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *openShiftAPIServers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("openshiftapiservers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched openShiftAPIServer. +func (c *openShiftAPIServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OpenShiftAPIServer, err error) { + result = &v1.OpenShiftAPIServer{} + err = c.client.Patch(pt). + Resource("openshiftapiservers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/openshiftcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/openshiftcontrollermanager.go new file mode 100644 index 0000000000..f68c64309c --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/openshiftcontrollermanager.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// OpenShiftControllerManagersGetter has a method to return a OpenShiftControllerManagerInterface. +// A group's client should implement this interface. +type OpenShiftControllerManagersGetter interface { + OpenShiftControllerManagers() OpenShiftControllerManagerInterface +} + +// OpenShiftControllerManagerInterface has methods to work with OpenShiftControllerManager resources. +type OpenShiftControllerManagerInterface interface { + Create(ctx context.Context, openShiftControllerManager *v1.OpenShiftControllerManager, opts metav1.CreateOptions) (*v1.OpenShiftControllerManager, error) + Update(ctx context.Context, openShiftControllerManager *v1.OpenShiftControllerManager, opts metav1.UpdateOptions) (*v1.OpenShiftControllerManager, error) + UpdateStatus(ctx context.Context, openShiftControllerManager *v1.OpenShiftControllerManager, opts metav1.UpdateOptions) (*v1.OpenShiftControllerManager, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.OpenShiftControllerManager, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.OpenShiftControllerManagerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OpenShiftControllerManager, err error) + OpenShiftControllerManagerExpansion +} + +// openShiftControllerManagers implements OpenShiftControllerManagerInterface +type openShiftControllerManagers struct { + client rest.Interface +} + +// newOpenShiftControllerManagers returns a OpenShiftControllerManagers +func newOpenShiftControllerManagers(c *OperatorV1Client) *openShiftControllerManagers { + return &openShiftControllerManagers{ + client: c.RESTClient(), + } +} + +// Get takes name of the openShiftControllerManager, and returns the corresponding openShiftControllerManager object, and an error if there is any. +func (c *openShiftControllerManagers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.OpenShiftControllerManager, err error) { + result = &v1.OpenShiftControllerManager{} + err = c.client.Get(). + Resource("openshiftcontrollermanagers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of OpenShiftControllerManagers that match those selectors. +func (c *openShiftControllerManagers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.OpenShiftControllerManagerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.OpenShiftControllerManagerList{} + err = c.client.Get(). + Resource("openshiftcontrollermanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested openShiftControllerManagers. +func (c *openShiftControllerManagers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("openshiftcontrollermanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a openShiftControllerManager and creates it. Returns the server's representation of the openShiftControllerManager, and an error, if there is any. +func (c *openShiftControllerManagers) Create(ctx context.Context, openShiftControllerManager *v1.OpenShiftControllerManager, opts metav1.CreateOptions) (result *v1.OpenShiftControllerManager, err error) { + result = &v1.OpenShiftControllerManager{} + err = c.client.Post(). + Resource("openshiftcontrollermanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(openShiftControllerManager). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a openShiftControllerManager and updates it. Returns the server's representation of the openShiftControllerManager, and an error, if there is any. +func (c *openShiftControllerManagers) Update(ctx context.Context, openShiftControllerManager *v1.OpenShiftControllerManager, opts metav1.UpdateOptions) (result *v1.OpenShiftControllerManager, err error) { + result = &v1.OpenShiftControllerManager{} + err = c.client.Put(). + Resource("openshiftcontrollermanagers"). + Name(openShiftControllerManager.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(openShiftControllerManager). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *openShiftControllerManagers) UpdateStatus(ctx context.Context, openShiftControllerManager *v1.OpenShiftControllerManager, opts metav1.UpdateOptions) (result *v1.OpenShiftControllerManager, err error) { + result = &v1.OpenShiftControllerManager{} + err = c.client.Put(). + Resource("openshiftcontrollermanagers"). + Name(openShiftControllerManager.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(openShiftControllerManager). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the openShiftControllerManager and deletes it. Returns an error if one occurs. +func (c *openShiftControllerManagers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("openshiftcontrollermanagers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *openShiftControllerManagers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("openshiftcontrollermanagers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched openShiftControllerManager. +func (c *openShiftControllerManagers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.OpenShiftControllerManager, err error) { + result = &v1.OpenShiftControllerManager{} + err = c.client.Patch(pt). + Resource("openshiftcontrollermanagers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go new file mode 100644 index 0000000000..b5e4004d67 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" + "github.com/openshift/client-go/operator/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type OperatorV1Interface interface { + RESTClient() rest.Interface + AuthenticationsGetter + CSISnapshotControllersGetter + CloudCredentialsGetter + ClusterCSIDriversGetter + ConfigsGetter + ConsolesGetter + DNSesGetter + EtcdsGetter + IngressControllersGetter + KubeAPIServersGetter + KubeControllerManagersGetter + KubeSchedulersGetter + KubeStorageVersionMigratorsGetter + NetworksGetter + OpenShiftAPIServersGetter + OpenShiftControllerManagersGetter + ServiceCAsGetter + ServiceCatalogAPIServersGetter + ServiceCatalogControllerManagersGetter + StoragesGetter +} + +// OperatorV1Client is used to interact with features provided by the operator.openshift.io group. +type OperatorV1Client struct { + restClient rest.Interface +} + +func (c *OperatorV1Client) Authentications() AuthenticationInterface { + return newAuthentications(c) +} + +func (c *OperatorV1Client) CSISnapshotControllers() CSISnapshotControllerInterface { + return newCSISnapshotControllers(c) +} + +func (c *OperatorV1Client) CloudCredentials() CloudCredentialInterface { + return newCloudCredentials(c) +} + +func (c *OperatorV1Client) ClusterCSIDrivers() ClusterCSIDriverInterface { + return newClusterCSIDrivers(c) +} + +func (c *OperatorV1Client) Configs() ConfigInterface { + return newConfigs(c) +} + +func (c *OperatorV1Client) Consoles() ConsoleInterface { + return newConsoles(c) +} + +func (c *OperatorV1Client) DNSes() DNSInterface { + return newDNSes(c) +} + +func (c *OperatorV1Client) Etcds() EtcdInterface { + return newEtcds(c) +} + +func (c *OperatorV1Client) IngressControllers(namespace string) IngressControllerInterface { + return newIngressControllers(c, namespace) +} + +func (c *OperatorV1Client) KubeAPIServers() KubeAPIServerInterface { + return newKubeAPIServers(c) +} + +func (c *OperatorV1Client) KubeControllerManagers() KubeControllerManagerInterface { + return newKubeControllerManagers(c) +} + +func (c *OperatorV1Client) KubeSchedulers() KubeSchedulerInterface { + return newKubeSchedulers(c) +} + +func (c *OperatorV1Client) KubeStorageVersionMigrators() KubeStorageVersionMigratorInterface { + return newKubeStorageVersionMigrators(c) +} + +func (c *OperatorV1Client) Networks() NetworkInterface { + return newNetworks(c) +} + +func (c *OperatorV1Client) OpenShiftAPIServers() OpenShiftAPIServerInterface { + return newOpenShiftAPIServers(c) +} + +func (c *OperatorV1Client) OpenShiftControllerManagers() OpenShiftControllerManagerInterface { + return newOpenShiftControllerManagers(c) +} + +func (c *OperatorV1Client) ServiceCAs() ServiceCAInterface { + return newServiceCAs(c) +} + +func (c *OperatorV1Client) ServiceCatalogAPIServers() ServiceCatalogAPIServerInterface { + return newServiceCatalogAPIServers(c) +} + +func (c *OperatorV1Client) ServiceCatalogControllerManagers() ServiceCatalogControllerManagerInterface { + return newServiceCatalogControllerManagers(c) +} + +func (c *OperatorV1Client) Storages() StorageInterface { + return newStorages(c) +} + +// NewForConfig creates a new OperatorV1Client for the given config. +func NewForConfig(c *rest.Config) (*OperatorV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &OperatorV1Client{client}, nil +} + +// NewForConfigOrDie creates a new OperatorV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *OperatorV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new OperatorV1Client for the given RESTClient. +func New(c rest.Interface) *OperatorV1Client { + return &OperatorV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *OperatorV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/serviceca.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/serviceca.go new file mode 100644 index 0000000000..e15879e23b --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/serviceca.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ServiceCAsGetter has a method to return a ServiceCAInterface. +// A group's client should implement this interface. +type ServiceCAsGetter interface { + ServiceCAs() ServiceCAInterface +} + +// ServiceCAInterface has methods to work with ServiceCA resources. +type ServiceCAInterface interface { + Create(ctx context.Context, serviceCA *v1.ServiceCA, opts metav1.CreateOptions) (*v1.ServiceCA, error) + Update(ctx context.Context, serviceCA *v1.ServiceCA, opts metav1.UpdateOptions) (*v1.ServiceCA, error) + UpdateStatus(ctx context.Context, serviceCA *v1.ServiceCA, opts metav1.UpdateOptions) (*v1.ServiceCA, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceCA, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceCAList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceCA, err error) + ServiceCAExpansion +} + +// serviceCAs implements ServiceCAInterface +type serviceCAs struct { + client rest.Interface +} + +// newServiceCAs returns a ServiceCAs +func newServiceCAs(c *OperatorV1Client) *serviceCAs { + return &serviceCAs{ + client: c.RESTClient(), + } +} + +// Get takes name of the serviceCA, and returns the corresponding serviceCA object, and an error if there is any. +func (c *serviceCAs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceCA, err error) { + result = &v1.ServiceCA{} + err = c.client.Get(). + Resource("servicecas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceCAs that match those selectors. +func (c *serviceCAs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceCAList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ServiceCAList{} + err = c.client.Get(). + Resource("servicecas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceCAs. +func (c *serviceCAs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("servicecas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a serviceCA and creates it. Returns the server's representation of the serviceCA, and an error, if there is any. +func (c *serviceCAs) Create(ctx context.Context, serviceCA *v1.ServiceCA, opts metav1.CreateOptions) (result *v1.ServiceCA, err error) { + result = &v1.ServiceCA{} + err = c.client.Post(). + Resource("servicecas"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCA). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a serviceCA and updates it. Returns the server's representation of the serviceCA, and an error, if there is any. +func (c *serviceCAs) Update(ctx context.Context, serviceCA *v1.ServiceCA, opts metav1.UpdateOptions) (result *v1.ServiceCA, err error) { + result = &v1.ServiceCA{} + err = c.client.Put(). + Resource("servicecas"). + Name(serviceCA.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCA). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *serviceCAs) UpdateStatus(ctx context.Context, serviceCA *v1.ServiceCA, opts metav1.UpdateOptions) (result *v1.ServiceCA, err error) { + result = &v1.ServiceCA{} + err = c.client.Put(). + Resource("servicecas"). + Name(serviceCA.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCA). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the serviceCA and deletes it. Returns an error if one occurs. +func (c *serviceCAs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("servicecas"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceCAs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("servicecas"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched serviceCA. +func (c *serviceCAs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceCA, err error) { + result = &v1.ServiceCA{} + err = c.client.Patch(pt). + Resource("servicecas"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/servicecatalogapiserver.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/servicecatalogapiserver.go new file mode 100644 index 0000000000..da2dfc5115 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/servicecatalogapiserver.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ServiceCatalogAPIServersGetter has a method to return a ServiceCatalogAPIServerInterface. +// A group's client should implement this interface. +type ServiceCatalogAPIServersGetter interface { + ServiceCatalogAPIServers() ServiceCatalogAPIServerInterface +} + +// ServiceCatalogAPIServerInterface has methods to work with ServiceCatalogAPIServer resources. +type ServiceCatalogAPIServerInterface interface { + Create(ctx context.Context, serviceCatalogAPIServer *v1.ServiceCatalogAPIServer, opts metav1.CreateOptions) (*v1.ServiceCatalogAPIServer, error) + Update(ctx context.Context, serviceCatalogAPIServer *v1.ServiceCatalogAPIServer, opts metav1.UpdateOptions) (*v1.ServiceCatalogAPIServer, error) + UpdateStatus(ctx context.Context, serviceCatalogAPIServer *v1.ServiceCatalogAPIServer, opts metav1.UpdateOptions) (*v1.ServiceCatalogAPIServer, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceCatalogAPIServer, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceCatalogAPIServerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceCatalogAPIServer, err error) + ServiceCatalogAPIServerExpansion +} + +// serviceCatalogAPIServers implements ServiceCatalogAPIServerInterface +type serviceCatalogAPIServers struct { + client rest.Interface +} + +// newServiceCatalogAPIServers returns a ServiceCatalogAPIServers +func newServiceCatalogAPIServers(c *OperatorV1Client) *serviceCatalogAPIServers { + return &serviceCatalogAPIServers{ + client: c.RESTClient(), + } +} + +// Get takes name of the serviceCatalogAPIServer, and returns the corresponding serviceCatalogAPIServer object, and an error if there is any. +func (c *serviceCatalogAPIServers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceCatalogAPIServer, err error) { + result = &v1.ServiceCatalogAPIServer{} + err = c.client.Get(). + Resource("servicecatalogapiservers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceCatalogAPIServers that match those selectors. +func (c *serviceCatalogAPIServers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceCatalogAPIServerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ServiceCatalogAPIServerList{} + err = c.client.Get(). + Resource("servicecatalogapiservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceCatalogAPIServers. +func (c *serviceCatalogAPIServers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("servicecatalogapiservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a serviceCatalogAPIServer and creates it. Returns the server's representation of the serviceCatalogAPIServer, and an error, if there is any. +func (c *serviceCatalogAPIServers) Create(ctx context.Context, serviceCatalogAPIServer *v1.ServiceCatalogAPIServer, opts metav1.CreateOptions) (result *v1.ServiceCatalogAPIServer, err error) { + result = &v1.ServiceCatalogAPIServer{} + err = c.client.Post(). + Resource("servicecatalogapiservers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCatalogAPIServer). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a serviceCatalogAPIServer and updates it. Returns the server's representation of the serviceCatalogAPIServer, and an error, if there is any. +func (c *serviceCatalogAPIServers) Update(ctx context.Context, serviceCatalogAPIServer *v1.ServiceCatalogAPIServer, opts metav1.UpdateOptions) (result *v1.ServiceCatalogAPIServer, err error) { + result = &v1.ServiceCatalogAPIServer{} + err = c.client.Put(). + Resource("servicecatalogapiservers"). + Name(serviceCatalogAPIServer.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCatalogAPIServer). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *serviceCatalogAPIServers) UpdateStatus(ctx context.Context, serviceCatalogAPIServer *v1.ServiceCatalogAPIServer, opts metav1.UpdateOptions) (result *v1.ServiceCatalogAPIServer, err error) { + result = &v1.ServiceCatalogAPIServer{} + err = c.client.Put(). + Resource("servicecatalogapiservers"). + Name(serviceCatalogAPIServer.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCatalogAPIServer). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the serviceCatalogAPIServer and deletes it. Returns an error if one occurs. +func (c *serviceCatalogAPIServers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("servicecatalogapiservers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceCatalogAPIServers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("servicecatalogapiservers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched serviceCatalogAPIServer. +func (c *serviceCatalogAPIServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceCatalogAPIServer, err error) { + result = &v1.ServiceCatalogAPIServer{} + err = c.client.Patch(pt). + Resource("servicecatalogapiservers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/servicecatalogcontrollermanager.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/servicecatalogcontrollermanager.go new file mode 100644 index 0000000000..222308bfff --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/servicecatalogcontrollermanager.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ServiceCatalogControllerManagersGetter has a method to return a ServiceCatalogControllerManagerInterface. +// A group's client should implement this interface. +type ServiceCatalogControllerManagersGetter interface { + ServiceCatalogControllerManagers() ServiceCatalogControllerManagerInterface +} + +// ServiceCatalogControllerManagerInterface has methods to work with ServiceCatalogControllerManager resources. +type ServiceCatalogControllerManagerInterface interface { + Create(ctx context.Context, serviceCatalogControllerManager *v1.ServiceCatalogControllerManager, opts metav1.CreateOptions) (*v1.ServiceCatalogControllerManager, error) + Update(ctx context.Context, serviceCatalogControllerManager *v1.ServiceCatalogControllerManager, opts metav1.UpdateOptions) (*v1.ServiceCatalogControllerManager, error) + UpdateStatus(ctx context.Context, serviceCatalogControllerManager *v1.ServiceCatalogControllerManager, opts metav1.UpdateOptions) (*v1.ServiceCatalogControllerManager, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceCatalogControllerManager, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceCatalogControllerManagerList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceCatalogControllerManager, err error) + ServiceCatalogControllerManagerExpansion +} + +// serviceCatalogControllerManagers implements ServiceCatalogControllerManagerInterface +type serviceCatalogControllerManagers struct { + client rest.Interface +} + +// newServiceCatalogControllerManagers returns a ServiceCatalogControllerManagers +func newServiceCatalogControllerManagers(c *OperatorV1Client) *serviceCatalogControllerManagers { + return &serviceCatalogControllerManagers{ + client: c.RESTClient(), + } +} + +// Get takes name of the serviceCatalogControllerManager, and returns the corresponding serviceCatalogControllerManager object, and an error if there is any. +func (c *serviceCatalogControllerManagers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceCatalogControllerManager, err error) { + result = &v1.ServiceCatalogControllerManager{} + err = c.client.Get(). + Resource("servicecatalogcontrollermanagers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceCatalogControllerManagers that match those selectors. +func (c *serviceCatalogControllerManagers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceCatalogControllerManagerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ServiceCatalogControllerManagerList{} + err = c.client.Get(). + Resource("servicecatalogcontrollermanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceCatalogControllerManagers. +func (c *serviceCatalogControllerManagers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("servicecatalogcontrollermanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a serviceCatalogControllerManager and creates it. Returns the server's representation of the serviceCatalogControllerManager, and an error, if there is any. +func (c *serviceCatalogControllerManagers) Create(ctx context.Context, serviceCatalogControllerManager *v1.ServiceCatalogControllerManager, opts metav1.CreateOptions) (result *v1.ServiceCatalogControllerManager, err error) { + result = &v1.ServiceCatalogControllerManager{} + err = c.client.Post(). + Resource("servicecatalogcontrollermanagers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCatalogControllerManager). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a serviceCatalogControllerManager and updates it. Returns the server's representation of the serviceCatalogControllerManager, and an error, if there is any. +func (c *serviceCatalogControllerManagers) Update(ctx context.Context, serviceCatalogControllerManager *v1.ServiceCatalogControllerManager, opts metav1.UpdateOptions) (result *v1.ServiceCatalogControllerManager, err error) { + result = &v1.ServiceCatalogControllerManager{} + err = c.client.Put(). + Resource("servicecatalogcontrollermanagers"). + Name(serviceCatalogControllerManager.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCatalogControllerManager). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *serviceCatalogControllerManagers) UpdateStatus(ctx context.Context, serviceCatalogControllerManager *v1.ServiceCatalogControllerManager, opts metav1.UpdateOptions) (result *v1.ServiceCatalogControllerManager, err error) { + result = &v1.ServiceCatalogControllerManager{} + err = c.client.Put(). + Resource("servicecatalogcontrollermanagers"). + Name(serviceCatalogControllerManager.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(serviceCatalogControllerManager). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the serviceCatalogControllerManager and deletes it. Returns an error if one occurs. +func (c *serviceCatalogControllerManagers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("servicecatalogcontrollermanagers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceCatalogControllerManagers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("servicecatalogcontrollermanagers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched serviceCatalogControllerManager. +func (c *serviceCatalogControllerManagers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceCatalogControllerManager, err error) { + result = &v1.ServiceCatalogControllerManager{} + err = c.client.Patch(pt). + Resource("servicecatalogcontrollermanagers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/storage.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/storage.go new file mode 100644 index 0000000000..ae4ca8e0dc --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/storage.go @@ -0,0 +1,168 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// StoragesGetter has a method to return a StorageInterface. +// A group's client should implement this interface. +type StoragesGetter interface { + Storages() StorageInterface +} + +// StorageInterface has methods to work with Storage resources. +type StorageInterface interface { + Create(ctx context.Context, storage *v1.Storage, opts metav1.CreateOptions) (*v1.Storage, error) + Update(ctx context.Context, storage *v1.Storage, opts metav1.UpdateOptions) (*v1.Storage, error) + UpdateStatus(ctx context.Context, storage *v1.Storage, opts metav1.UpdateOptions) (*v1.Storage, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Storage, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.StorageList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Storage, err error) + StorageExpansion +} + +// storages implements StorageInterface +type storages struct { + client rest.Interface +} + +// newStorages returns a Storages +func newStorages(c *OperatorV1Client) *storages { + return &storages{ + client: c.RESTClient(), + } +} + +// Get takes name of the storage, and returns the corresponding storage object, and an error if there is any. +func (c *storages) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Storage, err error) { + result = &v1.Storage{} + err = c.client.Get(). + Resource("storages"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Storages that match those selectors. +func (c *storages) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.StorageList{} + err = c.client.Get(). + Resource("storages"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storages. +func (c *storages) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storages"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a storage and creates it. Returns the server's representation of the storage, and an error, if there is any. +func (c *storages) Create(ctx context.Context, storage *v1.Storage, opts metav1.CreateOptions) (result *v1.Storage, err error) { + result = &v1.Storage{} + err = c.client.Post(). + Resource("storages"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storage). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a storage and updates it. Returns the server's representation of the storage, and an error, if there is any. +func (c *storages) Update(ctx context.Context, storage *v1.Storage, opts metav1.UpdateOptions) (result *v1.Storage, err error) { + result = &v1.Storage{} + err = c.client.Put(). + Resource("storages"). + Name(storage.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storage). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *storages) UpdateStatus(ctx context.Context, storage *v1.Storage, opts metav1.UpdateOptions) (result *v1.Storage, err error) { + result = &v1.Storage{} + err = c.client.Put(). + Resource("storages"). + Name(storage.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storage). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the storage and deletes it. Returns an error if one occurs. +func (c *storages) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("storages"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storages) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storages"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched storage. +func (c *storages) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Storage, err error) { + result = &v1.Storage{} + err = c.client.Patch(pt). + Resource("storages"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/doc.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/doc.go new file mode 100644 index 0000000000..93a7ca4e0e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/doc.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/doc.go new file mode 100644 index 0000000000..2b5ba4c8e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_imagecontentsourcepolicy.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_imagecontentsourcepolicy.go new file mode 100644 index 0000000000..e2e0cd14f8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_imagecontentsourcepolicy.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/openshift/api/operator/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageContentSourcePolicies implements ImageContentSourcePolicyInterface +type FakeImageContentSourcePolicies struct { + Fake *FakeOperatorV1alpha1 +} + +var imagecontentsourcepoliciesResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1alpha1", Resource: "imagecontentsourcepolicies"} + +var imagecontentsourcepoliciesKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1alpha1", Kind: "ImageContentSourcePolicy"} + +// Get takes name of the imageContentSourcePolicy, and returns the corresponding imageContentSourcePolicy object, and an error if there is any. +func (c *FakeImageContentSourcePolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ImageContentSourcePolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(imagecontentsourcepoliciesResource, name), &v1alpha1.ImageContentSourcePolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ImageContentSourcePolicy), err +} + +// List takes label and field selectors, and returns the list of ImageContentSourcePolicies that match those selectors. +func (c *FakeImageContentSourcePolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ImageContentSourcePolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(imagecontentsourcepoliciesResource, imagecontentsourcepoliciesKind, opts), &v1alpha1.ImageContentSourcePolicyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ImageContentSourcePolicyList{ListMeta: obj.(*v1alpha1.ImageContentSourcePolicyList).ListMeta} + for _, item := range obj.(*v1alpha1.ImageContentSourcePolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageContentSourcePolicies. +func (c *FakeImageContentSourcePolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(imagecontentsourcepoliciesResource, opts)) +} + +// Create takes the representation of a imageContentSourcePolicy and creates it. Returns the server's representation of the imageContentSourcePolicy, and an error, if there is any. +func (c *FakeImageContentSourcePolicies) Create(ctx context.Context, imageContentSourcePolicy *v1alpha1.ImageContentSourcePolicy, opts v1.CreateOptions) (result *v1alpha1.ImageContentSourcePolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagecontentsourcepoliciesResource, imageContentSourcePolicy), &v1alpha1.ImageContentSourcePolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ImageContentSourcePolicy), err +} + +// Update takes the representation of a imageContentSourcePolicy and updates it. Returns the server's representation of the imageContentSourcePolicy, and an error, if there is any. +func (c *FakeImageContentSourcePolicies) Update(ctx context.Context, imageContentSourcePolicy *v1alpha1.ImageContentSourcePolicy, opts v1.UpdateOptions) (result *v1alpha1.ImageContentSourcePolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(imagecontentsourcepoliciesResource, imageContentSourcePolicy), &v1alpha1.ImageContentSourcePolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ImageContentSourcePolicy), err +} + +// Delete takes name of the imageContentSourcePolicy and deletes it. Returns an error if one occurs. +func (c *FakeImageContentSourcePolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(imagecontentsourcepoliciesResource, name), &v1alpha1.ImageContentSourcePolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageContentSourcePolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(imagecontentsourcepoliciesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ImageContentSourcePolicyList{}) + return err +} + +// Patch applies the patch and returns the patched imageContentSourcePolicy. +func (c *FakeImageContentSourcePolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ImageContentSourcePolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagecontentsourcepoliciesResource, name, pt, data, subresources...), &v1alpha1.ImageContentSourcePolicy{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ImageContentSourcePolicy), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go new file mode 100644 index 0000000000..4759c12832 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake/fake_operator_client.go @@ -0,0 +1,24 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeOperatorV1alpha1 struct { + *testing.Fake +} + +func (c *FakeOperatorV1alpha1) ImageContentSourcePolicies() v1alpha1.ImageContentSourcePolicyInterface { + return &FakeImageContentSourcePolicies{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeOperatorV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/generated_expansion.go new file mode 100644 index 0000000000..42ec352cf6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ImageContentSourcePolicyExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/imagecontentsourcepolicy.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/imagecontentsourcepolicy.go new file mode 100644 index 0000000000..e2f70b6fd5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/imagecontentsourcepolicy.go @@ -0,0 +1,152 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/openshift/api/operator/v1alpha1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ImageContentSourcePoliciesGetter has a method to return a ImageContentSourcePolicyInterface. +// A group's client should implement this interface. +type ImageContentSourcePoliciesGetter interface { + ImageContentSourcePolicies() ImageContentSourcePolicyInterface +} + +// ImageContentSourcePolicyInterface has methods to work with ImageContentSourcePolicy resources. +type ImageContentSourcePolicyInterface interface { + Create(ctx context.Context, imageContentSourcePolicy *v1alpha1.ImageContentSourcePolicy, opts v1.CreateOptions) (*v1alpha1.ImageContentSourcePolicy, error) + Update(ctx context.Context, imageContentSourcePolicy *v1alpha1.ImageContentSourcePolicy, opts v1.UpdateOptions) (*v1alpha1.ImageContentSourcePolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ImageContentSourcePolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ImageContentSourcePolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ImageContentSourcePolicy, err error) + ImageContentSourcePolicyExpansion +} + +// imageContentSourcePolicies implements ImageContentSourcePolicyInterface +type imageContentSourcePolicies struct { + client rest.Interface +} + +// newImageContentSourcePolicies returns a ImageContentSourcePolicies +func newImageContentSourcePolicies(c *OperatorV1alpha1Client) *imageContentSourcePolicies { + return &imageContentSourcePolicies{ + client: c.RESTClient(), + } +} + +// Get takes name of the imageContentSourcePolicy, and returns the corresponding imageContentSourcePolicy object, and an error if there is any. +func (c *imageContentSourcePolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ImageContentSourcePolicy, err error) { + result = &v1alpha1.ImageContentSourcePolicy{} + err = c.client.Get(). + Resource("imagecontentsourcepolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ImageContentSourcePolicies that match those selectors. +func (c *imageContentSourcePolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ImageContentSourcePolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ImageContentSourcePolicyList{} + err = c.client.Get(). + Resource("imagecontentsourcepolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested imageContentSourcePolicies. +func (c *imageContentSourcePolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("imagecontentsourcepolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a imageContentSourcePolicy and creates it. Returns the server's representation of the imageContentSourcePolicy, and an error, if there is any. +func (c *imageContentSourcePolicies) Create(ctx context.Context, imageContentSourcePolicy *v1alpha1.ImageContentSourcePolicy, opts v1.CreateOptions) (result *v1alpha1.ImageContentSourcePolicy, err error) { + result = &v1alpha1.ImageContentSourcePolicy{} + err = c.client.Post(). + Resource("imagecontentsourcepolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageContentSourcePolicy). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a imageContentSourcePolicy and updates it. Returns the server's representation of the imageContentSourcePolicy, and an error, if there is any. +func (c *imageContentSourcePolicies) Update(ctx context.Context, imageContentSourcePolicy *v1alpha1.ImageContentSourcePolicy, opts v1.UpdateOptions) (result *v1alpha1.ImageContentSourcePolicy, err error) { + result = &v1alpha1.ImageContentSourcePolicy{} + err = c.client.Put(). + Resource("imagecontentsourcepolicies"). + Name(imageContentSourcePolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageContentSourcePolicy). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the imageContentSourcePolicy and deletes it. Returns an error if one occurs. +func (c *imageContentSourcePolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("imagecontentsourcepolicies"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *imageContentSourcePolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("imagecontentsourcepolicies"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched imageContentSourcePolicy. +func (c *imageContentSourcePolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ImageContentSourcePolicy, err error) { + result = &v1alpha1.ImageContentSourcePolicy{} + err = c.client.Patch(pt). + Resource("imagecontentsourcepolicies"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go new file mode 100644 index 0000000000..9d8c791979 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/operator_client.go @@ -0,0 +1,73 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/openshift/api/operator/v1alpha1" + "github.com/openshift/client-go/operator/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type OperatorV1alpha1Interface interface { + RESTClient() rest.Interface + ImageContentSourcePoliciesGetter +} + +// OperatorV1alpha1Client is used to interact with features provided by the operator.openshift.io group. +type OperatorV1alpha1Client struct { + restClient rest.Interface +} + +func (c *OperatorV1alpha1Client) ImageContentSourcePolicies() ImageContentSourcePolicyInterface { + return newImageContentSourcePolicies(c) +} + +// NewForConfig creates a new OperatorV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*OperatorV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &OperatorV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new OperatorV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *OperatorV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new OperatorV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *OperatorV1alpha1Client { + return &OperatorV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *OperatorV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go new file mode 100644 index 0000000000..f0cc2dbf4f --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -0,0 +1,160 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + + openapi_v2 "github.com/googleapis/gnostic/openapiv2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" + kubeversion "k8s.io/client-go/pkg/version" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/testing" +) + +// FakeDiscovery implements discovery.DiscoveryInterface and sometimes calls testing.Fake.Invoke with an action, +// but doesn't respect the return value if any. There is a way to fake static values like ServerVersion by using the Faked... fields on the struct. +type FakeDiscovery struct { + *testing.Fake + FakedServerVersion *version.Info +} + +// ServerResourcesForGroupVersion returns the supported resources for a group +// and version. +func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + for _, resourceList := range c.Resources { + if resourceList.GroupVersion == groupVersion { + return resourceList, nil + } + } + return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) +} + +// ServerResources returns the supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. +func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { + _, rs, err := c.ServerGroupsAndResources() + return rs, err +} + +// ServerGroupsAndResources returns the supported groups and resources for all groups and versions. +func (c *FakeDiscovery) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + sgs, err := c.ServerGroups() + if err != nil { + return nil, nil, err + } + resultGroups := []*metav1.APIGroup{} + for i := range sgs.Groups { + resultGroups = append(resultGroups, &sgs.Groups[i]) + } + + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + return resultGroups, c.Resources, nil +} + +// ServerPreferredResources returns the supported resources with the version +// preferred by the server. +func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +// ServerPreferredNamespacedResources returns the supported namespaced resources +// with the version preferred by the server. +func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +// ServerGroups returns the supported groups, with information like supported +// versions and the preferred version. +func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "group"}, + } + c.Invokes(action, nil) + + groups := map[string]*metav1.APIGroup{} + + for _, res := range c.Resources { + gv, err := schema.ParseGroupVersion(res.GroupVersion) + if err != nil { + return nil, err + } + group := groups[gv.Group] + if group == nil { + group = &metav1.APIGroup{ + Name: gv.Group, + PreferredVersion: metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }, + } + groups[gv.Group] = group + } + + group.Versions = append(group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }) + } + + list := &metav1.APIGroupList{} + for _, apiGroup := range groups { + list.Groups = append(list.Groups, *apiGroup) + } + + return list, nil + +} + +// ServerVersion retrieves and parses the server's version. +func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { + action := testing.ActionImpl{} + action.Verb = "get" + action.Resource = schema.GroupVersionResource{Resource: "version"} + c.Invokes(action, nil) + + if c.FakedServerVersion != nil { + return c.FakedServerVersion, nil + } + + versionInfo := kubeversion.Get() + return &versionInfo, nil +} + +// OpenAPISchema retrieves and parses the swagger API schema the server supports. +func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { + return &openapi_v2.Document{}, nil +} + +// RESTClient returns a RESTClient that is used to communicate with API server +// by this client implementation. +func (c *FakeDiscovery) RESTClient() restclient.Interface { + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 93b1df24da..5a32db47ac 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -253,10 +253,13 @@ github.com/opencontainers/image-spec/specs-go/v1 github.com/opencontainers/runc/libcontainer/user # github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01 github.com/openshift/api/authorization/v1 +github.com/openshift/api/config/v1 github.com/openshift/api/image/docker10 github.com/openshift/api/image/dockerpre012 github.com/openshift/api/image/v1 github.com/openshift/api/oauth/v1 +github.com/openshift/api/operator/v1 +github.com/openshift/api/operator/v1alpha1 github.com/openshift/api/pkg/serialization github.com/openshift/api/project/v1 github.com/openshift/api/route/v1 @@ -271,6 +274,13 @@ github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/fake github.com/openshift/client-go/oauth/clientset/versioned github.com/openshift/client-go/oauth/clientset/versioned/scheme github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1 +github.com/openshift/client-go/operator/clientset/versioned +github.com/openshift/client-go/operator/clientset/versioned/fake +github.com/openshift/client-go/operator/clientset/versioned/scheme +github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1 +github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake +github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1 +github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake github.com/openshift/client-go/project/clientset/versioned/scheme github.com/openshift/client-go/project/clientset/versioned/typed/project/v1 github.com/openshift/client-go/route/clientset/versioned @@ -576,6 +586,7 @@ k8s.io/client-go/applyconfigurations/storage/v1 k8s.io/client-go/applyconfigurations/storage/v1alpha1 k8s.io/client-go/applyconfigurations/storage/v1beta1 k8s.io/client-go/discovery +k8s.io/client-go/discovery/fake k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/scheme k8s.io/client-go/kubernetes/typed/admissionregistration/v1 From bc0514252cb834a386e60974d54c76fd33a59725 Mon Sep 17 00:00:00 2001 From: Ricardo Maraschini Date: Thu, 3 Jun 2021 10:55:04 +0200 Subject: [PATCH 2/3] ICSP support Implements ICSP support for image registry. --- pkg/dockerregistry/server/client/client.go | 26 +- .../server/client/interfaces.go | 5 + pkg/dockerregistry/server/client/test.go | 7 +- .../server/pullthroughblobstore_test.go | 7 + .../server/pullthroughmanifestservice.go | 6 +- .../server/pullthroughmanifestservice_test.go | 11 + pkg/dockerregistry/server/remoteblobgetter.go | 8 +- pkg/dockerregistry/server/repository.go | 6 + pkg/dockerregistry/server/simplelookupicsp.go | 95 ++++++ .../server/simplelookupicsp_test.go | 314 ++++++++++++++++++ pkg/dockerregistry/server/util.go | 5 +- pkg/dockerregistry/server/util_test.go | 4 +- .../pullthrough/pullthrough_test.go | 178 ++++++++++ 13 files changed, 657 insertions(+), 15 deletions(-) create mode 100644 pkg/dockerregistry/server/simplelookupicsp.go create mode 100644 pkg/dockerregistry/server/simplelookupicsp_test.go diff --git a/pkg/dockerregistry/server/client/client.go b/pkg/dockerregistry/server/client/client.go index bd2fcbe7fb..04875a2da9 100644 --- a/pkg/dockerregistry/server/client/client.go +++ b/pkg/dockerregistry/server/client/client.go @@ -6,6 +6,7 @@ import ( restclient "k8s.io/client-go/rest" imageclientv1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + operatorclientv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" userclientv1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" "github.com/openshift/image-registry/pkg/origin-common/clientcmd" ) @@ -32,13 +33,15 @@ type Interface interface { LocalSubjectAccessReviewsNamespacer SelfSubjectAccessReviewsNamespacer UsersInterfacer + ImageContentSourcePolicyInterfacer } type apiClient struct { - kube coreclientv1.CoreV1Interface - auth authclientv1.AuthorizationV1Interface - image imageclientv1.ImageV1Interface - user userclientv1.UserV1Interface + kube coreclientv1.CoreV1Interface + auth authclientv1.AuthorizationV1Interface + image imageclientv1.ImageV1Interface + user userclientv1.UserV1Interface + operator operatorclientv1alpha1.OperatorV1alpha1Interface } func newAPIClient( @@ -46,15 +49,21 @@ func newAPIClient( authClient authclientv1.AuthorizationV1Interface, imageClient imageclientv1.ImageV1Interface, userClient userclientv1.UserV1Interface, + operatorClient operatorclientv1alpha1.OperatorV1alpha1Interface, ) Interface { return &apiClient{ - kube: kc, - auth: authClient, - image: imageClient, - user: userClient, + kube: kc, + auth: authClient, + image: imageClient, + user: userClient, + operator: operatorClient, } } +func (c *apiClient) ImageContentSourcePolicy() operatorclientv1alpha1.ImageContentSourcePolicyInterface { + return c.operator.ImageContentSourcePolicies() +} + func (c *apiClient) Users() UserInterface { return c.user.Users() } @@ -117,6 +126,7 @@ func (c *registryClient) Client() (Interface, error) { authclientv1.NewForConfigOrDie(c.kubeConfig), imageclientv1.NewForConfigOrDie(c.kubeConfig), userclientv1.NewForConfigOrDie(c.kubeConfig), + operatorclientv1alpha1.NewForConfigOrDie(c.kubeConfig), ), nil } diff --git a/pkg/dockerregistry/server/client/interfaces.go b/pkg/dockerregistry/server/client/interfaces.go index 57fb27ebac..074cc74b7f 100644 --- a/pkg/dockerregistry/server/client/interfaces.go +++ b/pkg/dockerregistry/server/client/interfaces.go @@ -12,6 +12,7 @@ import ( authapiv1 "k8s.io/api/authorization/v1" imageclientv1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + operatorclientv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" userclientv1 "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" authclientv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -21,6 +22,10 @@ type UsersInterfacer interface { Users() UserInterface } +type ImageContentSourcePolicyInterfacer interface { + ImageContentSourcePolicy() operatorclientv1alpha1.ImageContentSourcePolicyInterface +} + type ImagesInterfacer interface { Images() ImageInterface } diff --git a/pkg/dockerregistry/server/client/test.go b/pkg/dockerregistry/server/client/test.go index c91815f04c..156620d1aa 100644 --- a/pkg/dockerregistry/server/client/test.go +++ b/pkg/dockerregistry/server/client/test.go @@ -4,6 +4,7 @@ import ( coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" imageclientv1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake" ) type fakeRegistryClient struct { @@ -20,9 +21,11 @@ func NewFakeRegistryClient(imageclient imageclientv1.ImageV1Interface) RegistryC } func (c *fakeRegistryClient) Client() (Interface, error) { - return newAPIClient(nil, nil, c.images, nil), nil + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1() + return newAPIClient(nil, nil, c.images, nil, icsp), nil } func NewFakeRegistryAPIClient(kc coreclientv1.CoreV1Interface, imageclient imageclientv1.ImageV1Interface) Interface { - return newAPIClient(nil, nil, imageclient, nil) + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1() + return newAPIClient(nil, nil, imageclient, nil, icsp) } diff --git a/pkg/dockerregistry/server/pullthroughblobstore_test.go b/pkg/dockerregistry/server/pullthroughblobstore_test.go index 6b6c0b9876..ebc4f3d998 100644 --- a/pkg/dockerregistry/server/pullthroughblobstore_test.go +++ b/pkg/dockerregistry/server/pullthroughblobstore_test.go @@ -20,6 +20,7 @@ import ( "github.com/opencontainers/go-digest" imageapiv1 "github.com/openshift/api/image/v1" + operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake" "github.com/openshift/library-go/pkg/image/registryclient" "github.com/openshift/image-registry/pkg/dockerregistry/server/cache" @@ -33,6 +34,7 @@ import ( ) func TestPullthroughServeBlob(t *testing.T) { + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1().ImageContentSourcePolicies() ctx := context.Background() ctx = testutil.WithTestLogger(ctx, t) @@ -168,6 +170,7 @@ func TestPullthroughServeBlob(t *testing.T) { imageStream.GetSecrets, cache, metrics.NewNoopMetrics(), + icsp, ) ptbs := &pullthroughBlobStore{ @@ -327,6 +330,7 @@ func TestPullthroughServeNotSeekableBlob(t *testing.T) { } func TestPullthroughServeBlobInsecure(t *testing.T) { + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1().ImageContentSourcePolicies() namespace := "user" repo1 := "app1" repo2 := "app2" @@ -603,6 +607,7 @@ func TestPullthroughServeBlobInsecure(t *testing.T) { imageStream.GetSecrets, cache, metrics.NewNoopMetrics(), + icsp, ) ptbs := &pullthroughBlobStore{ @@ -669,6 +674,7 @@ func TestPullthroughServeBlobInsecure(t *testing.T) { } func TestPullthroughMetrics(t *testing.T) { + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1().ImageContentSourcePolicies() ctx := context.Background() ctx = testutil.WithTestLogger(ctx, t) @@ -728,6 +734,7 @@ func TestPullthroughMetrics(t *testing.T) { imageStream.GetSecrets, cache, metrics.NewMetrics(sink), + icsp, ) ptbs := &pullthroughBlobStore{ diff --git a/pkg/dockerregistry/server/pullthroughmanifestservice.go b/pkg/dockerregistry/server/pullthroughmanifestservice.go index c97b8ceacf..8476528112 100644 --- a/pkg/dockerregistry/server/pullthroughmanifestservice.go +++ b/pkg/dockerregistry/server/pullthroughmanifestservice.go @@ -8,6 +8,8 @@ import ( dcontext "github.com/docker/distribution/context" "github.com/opencontainers/go-digest" + operatorv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" + "github.com/openshift/image-registry/pkg/dockerregistry/server/cache" "github.com/openshift/image-registry/pkg/dockerregistry/server/metrics" "github.com/openshift/image-registry/pkg/errors" @@ -27,6 +29,7 @@ type pullthroughManifestService struct { mirror bool registryAddr string metrics metrics.Pullthrough + icsp operatorv1alpha1.ImageContentSourcePolicyInterface } var _ distribution.ManifestService = &pullthroughManifestService{} @@ -112,12 +115,13 @@ func (m *pullthroughManifestService) mirrorManifest(ctx context.Context, manifes } func (m *pullthroughManifestService) getRemoteRepositoryClient(ctx context.Context, ref *imageapi.DockerImageReference, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Repository, error) { + dcontext.GetLogger(ctx).Debug("(*pullthroughManifestService).getRemoteRepositoryClient") secrets, err := m.imageStream.GetSecrets() if err != nil { dcontext.GetLogger(ctx).Errorf("error getting secrets: %v", err) } - retriever, impErr := getImportContext(ctx, ref, secrets, m.metrics) + retriever, impErr := getImportContext(ctx, ref, secrets, m.metrics, m.icsp) if impErr != nil { return nil, impErr } diff --git a/pkg/dockerregistry/server/pullthroughmanifestservice_test.go b/pkg/dockerregistry/server/pullthroughmanifestservice_test.go index 70e3531a5b..dd67f6c86d 100644 --- a/pkg/dockerregistry/server/pullthroughmanifestservice_test.go +++ b/pkg/dockerregistry/server/pullthroughmanifestservice_test.go @@ -18,6 +18,7 @@ import ( "github.com/opencontainers/go-digest" imageapiv1 "github.com/openshift/api/image/v1" + operatorfake "github.com/openshift/client-go/operator/clientset/versioned/fake" "github.com/openshift/image-registry/pkg/dockerregistry/server/cache" registryclient "github.com/openshift/image-registry/pkg/dockerregistry/server/client" @@ -54,6 +55,7 @@ func createTestRegistryServer(t *testing.T, ctx context.Context) *httptest.Serve } func TestPullthroughManifests(t *testing.T) { + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1().ImageContentSourcePolicies() namespace := "fuser" repo := "zapp" repoName := fmt.Sprintf("%s/%s", namespace, repo) @@ -187,6 +189,7 @@ func TestPullthroughManifests(t *testing.T) { cache: cache, registryAddr: "localhost:5000", metrics: metrics.NewNoopMetrics(), + icsp: icsp, } manifestResult, err := ptms.Get(ctx, tc.manifestDigest) @@ -225,6 +228,7 @@ func TestPullthroughManifests(t *testing.T) { } func TestPullthroughManifestInsecure(t *testing.T) { + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1().ImageContentSourcePolicies() namespace := "fuser" repo := "zapp" repoName := fmt.Sprintf("%s/%s", namespace, repo) @@ -428,6 +432,7 @@ func TestPullthroughManifestInsecure(t *testing.T) { imageStream: imageStream, cache: cache, metrics: metrics.NewNoopMetrics(), + icsp: icsp, } manifestResult, err := ptms.Get(ctx, tc.manifestDigest) @@ -468,6 +473,7 @@ func TestPullthroughManifestInsecure(t *testing.T) { } func TestPullthroughManifestDockerReference(t *testing.T) { + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1().ImageContentSourcePolicies() namespace := "user" repo1 := "repo1" repo2 := "repo2" @@ -567,6 +573,7 @@ func TestPullthroughManifestDockerReference(t *testing.T) { ManifestService: newTestManifestService(tc.repoName, nil), imageStream: imageStream, metrics: metrics.NewNoopMetrics(), + icsp: icsp, } ptms.Get(ctx, digest.Digest(img.Name)) @@ -661,6 +668,7 @@ func (ms *putWaiterManifestService) Put(ctx context.Context, manifest distributi } func TestPullthroughManifestMirroring(t *testing.T) { + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1().ImageContentSourcePolicies() const timeout = 5 * time.Second namespace := "myproject" @@ -725,6 +733,7 @@ func TestPullthroughManifestMirroring(t *testing.T) { imageStream: imageStream, mirror: true, metrics: metrics.NewNoopMetrics(), + icsp: icsp, } _, err = ptms.Get(ctx, digest.Digest(img.Name)) @@ -740,6 +749,7 @@ func TestPullthroughManifestMirroring(t *testing.T) { } func TestPullthroughManifestMetrics(t *testing.T) { + icsp := operatorfake.NewSimpleClientset().OperatorV1alpha1().ImageContentSourcePolicies() namespace := "myproject" repo := "myapp" repoName := fmt.Sprintf("%s/%s", namespace, repo) @@ -801,6 +811,7 @@ func TestPullthroughManifestMetrics(t *testing.T) { newLocalManifestService: func(ctx context.Context) (distribution.ManifestService, error) { return ms, nil }, imageStream: imageStream, metrics: metrics.NewMetrics(sink), + icsp: icsp, } _, err = ptms.Get(ctx, digest.Digest(img.Name)) diff --git a/pkg/dockerregistry/server/remoteblobgetter.go b/pkg/dockerregistry/server/remoteblobgetter.go index f9b12865d6..79c455f026 100644 --- a/pkg/dockerregistry/server/remoteblobgetter.go +++ b/pkg/dockerregistry/server/remoteblobgetter.go @@ -11,6 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" + operatorv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" "github.com/openshift/library-go/pkg/image/registryclient" "github.com/openshift/image-registry/pkg/dockerregistry/server/cache" @@ -68,6 +69,7 @@ type remoteBlobGetterService struct { cache cache.RepositoryDigest digestToStore *digestBlobStoreCache metrics metrics.Pullthrough + icsp operatorv1alpha1.ImageContentSourcePolicyInterface } var _ BlobGetterService = &remoteBlobGetterService{} @@ -79,6 +81,7 @@ func NewBlobGetterService( secretsGetter secretsGetter, cache cache.RepositoryDigest, m metrics.Pullthrough, + icsp operatorv1alpha1.ImageContentSourcePolicyInterface, ) BlobGetterService { return &remoteBlobGetterService{ imageStream: imageStream, @@ -86,6 +89,7 @@ func NewBlobGetterService( cache: cache, digestToStore: newDigestBlobStoreCache(m), metrics: m, + icsp: icsp, } } @@ -272,7 +276,7 @@ func (rbgs *remoteBlobGetterService) findCandidateRepository( continue } - retriever, impErr := getImportContext(ctx, spec.DockerImageReference, secrets, rbgs.metrics) + retriever, impErr := getImportContext(ctx, spec.DockerImageReference, secrets, rbgs.metrics, rbgs.icsp) if impErr != nil { return distribution.Descriptor{}, nil, impErr } @@ -293,7 +297,7 @@ func (rbgs *remoteBlobGetterService) findCandidateRepository( continue } - retriever, impErr := getImportContext(ctx, spec.DockerImageReference, secrets, rbgs.metrics) + retriever, impErr := getImportContext(ctx, spec.DockerImageReference, secrets, rbgs.metrics, rbgs.icsp) if impErr != nil { return distribution.Descriptor{}, nil, impErr } diff --git a/pkg/dockerregistry/server/repository.go b/pkg/dockerregistry/server/repository.go index 3a37ac12a9..9cdddc2784 100644 --- a/pkg/dockerregistry/server/repository.go +++ b/pkg/dockerregistry/server/repository.go @@ -11,6 +11,8 @@ import ( restclient "k8s.io/client-go/rest" + operatorv1alpha1 "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" + "github.com/openshift/image-registry/pkg/dockerregistry/server/audit" "github.com/openshift/image-registry/pkg/dockerregistry/server/cache" "github.com/openshift/image-registry/pkg/imagestream" @@ -44,6 +46,7 @@ type repository struct { crossmount bool imageStream imagestream.ImageStream + icsp operatorv1alpha1.ImageContentSourcePolicyInterface // remoteBlobGetter is used to fetch blobs from remote registries if pullthrough is enabled. remoteBlobGetter BlobGetterService @@ -71,6 +74,7 @@ func (app *App) Repository(ctx context.Context, repo distribution.Repository, cr imageStream: imagestream.New(ctx, namespace, name, registryOSClient), cache: cache.NewRepositoryDigest(app.cache), + icsp: registryOSClient.ImageContentSourcePolicy(), } r.remoteBlobGetter = NewBlobGetterService( @@ -78,6 +82,7 @@ func (app *App) Repository(ctx context.Context, repo distribution.Repository, cr r.imageStream.GetSecrets, r.cache, r.app.metrics, + r.icsp, ) repo = distribution.Repository(r) @@ -117,6 +122,7 @@ func (r *repository) Manifests(ctx context.Context, options ...distribution.Mani mirror: r.app.config.Pullthrough.Mirror, registryAddr: r.app.config.Server.Addr, metrics: r.app.metrics, + icsp: r.icsp, } ms = newPendingErrorsManifestService(ms, r) diff --git a/pkg/dockerregistry/server/simplelookupicsp.go b/pkg/dockerregistry/server/simplelookupicsp.go new file mode 100644 index 0000000000..a1c0dbc163 --- /dev/null +++ b/pkg/dockerregistry/server/simplelookupicsp.go @@ -0,0 +1,95 @@ +package server + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + operatorv1alpha1client "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1" + reference "github.com/openshift/library-go/pkg/image/reference" + "github.com/openshift/library-go/pkg/image/registryclient" +) + +// simpleLookupICSP holds ImageContentSourcePolicy variables to look up image sources. Satisfies +// *Context AlternativeBlobSourceStrategy interface. +type simpleLookupICSP struct { + icspClient operatorv1alpha1client.ImageContentSourcePolicyInterface +} + +// NewSimpleLookupICSPStrategy returns a new entity of simpleLookupICSP using provided client +// to obtain cluster wide ICSP configuration. +func NewSimpleLookupICSPStrategy( + cli operatorv1alpha1client.ImageContentSourcePolicyInterface, +) registryclient.AlternateBlobSourceStrategy { + return &simpleLookupICSP{ + icspClient: cli, + } +} + +// FirstRequest returns a list of sources to use when searching for a given repository. Returns +// the whole list of mirrors followed by the original image reference. +func (s *simpleLookupICSP) FirstRequest( + ctx context.Context, ref reference.DockerImageReference, +) ([]reference.DockerImageReference, error) { + klog.V(5).Infof("reading ICSP from cluster") + icspList, err := s.icspClient.List(ctx, metav1.ListOptions{}) + if err != nil { + klog.Errorf("unable to list ICSP config: %s", err) + return []reference.DockerImageReference{ref.AsRepository()}, nil + } + + imageRefList, err := s.alternativeImageSources(ref, icspList.Items) + if err != nil { + klog.Errorf("error looking for alternate repositories: %s", err) + return []reference.DockerImageReference{ref.AsRepository()}, nil + } + + imageRefList = append(imageRefList, ref.AsRepository()) + return imageRefList, nil +} + +func (s *simpleLookupICSP) OnFailure( + ctx context.Context, ref reference.DockerImageReference, +) ([]reference.DockerImageReference, error) { + return nil, nil +} + +// alternativeImageSources returns unique list of DockerImageReference objects from list of +// ImageContentSourcePolicy objects +func (s *simpleLookupICSP) alternativeImageSources( + ref reference.DockerImageReference, icspList []operatorv1alpha1.ImageContentSourcePolicy, +) ([]reference.DockerImageReference, error) { + imageSources := []reference.DockerImageReference{} + uniqueMirrors := map[reference.DockerImageReference]bool{} + for _, icsp := range icspList { + for _, rdm := range icsp.Spec.RepositoryDigestMirrors { + rdmSourceRef, err := reference.Parse(rdm.Source) + if err != nil { + return nil, err + } + + if ref.AsRepository() != rdmSourceRef.AsRepository() { + continue + } + + for _, m := range rdm.Mirrors { + mRef, err := reference.Parse(m) + if err != nil { + return nil, err + } + + if _, ok := uniqueMirrors[mRef]; ok { + continue + } + + imageSources = append(imageSources, mRef) + uniqueMirrors[mRef] = true + } + } + } + + klog.V(2).Infof("Found sources: %v for image: %v", imageSources, ref) + return imageSources, nil +} diff --git a/pkg/dockerregistry/server/simplelookupicsp_test.go b/pkg/dockerregistry/server/simplelookupicsp_test.go new file mode 100644 index 0000000000..9bc7799905 --- /dev/null +++ b/pkg/dockerregistry/server/simplelookupicsp_test.go @@ -0,0 +1,314 @@ +package server + +import ( + "context" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/api/operator/v1alpha1" + "github.com/openshift/client-go/operator/clientset/versioned/fake" + reference "github.com/openshift/library-go/pkg/image/reference" +) + +func TestFirstRequest(t *testing.T) { + for _, tt := range []struct { + name string + rules []runtime.Object + ref string + res []reference.DockerImageReference + }{ + { + name: "multiple mirrors", + ref: "i.do.not.exist/repo/image:latest", + res: []reference.DockerImageReference{ + { + Registry: "i.exist", + Namespace: "ns0", + Name: "img0", + }, + { + Registry: "i.also.exist", + Namespace: "ns1", + Name: "img1", + }, + { + Registry: "me.too", + Namespace: "ns2", + Name: "img2", + }, + { + Registry: "i.do.not.exist", + Namespace: "repo", + Name: "image", + }, + }, + rules: []runtime.Object{ + &v1alpha1.ImageContentSourcePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "icsp-rule", + }, + Spec: v1alpha1.ImageContentSourcePolicySpec{ + RepositoryDigestMirrors: []v1alpha1.RepositoryDigestMirrors{ + { + Source: "i.do.not.exist/repo/image", + Mirrors: []string{ + "i.exist/ns0/img0", + "i.also.exist/ns1/img1", + "me.too/ns2/img2", + }, + }, + }, + }, + }, + }, + }, + { + name: "happy path", + ref: "i.do.not.exist/repo/image:latest", + res: []reference.DockerImageReference{ + { + Registry: "i.exist", + Namespace: "namespace", + Name: "img", + }, + { + Registry: "i.do.not.exist", + Namespace: "repo", + Name: "image", + }, + }, + rules: []runtime.Object{ + &v1alpha1.ImageContentSourcePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "icsp-rule", + }, + Spec: v1alpha1.ImageContentSourcePolicySpec{ + RepositoryDigestMirrors: []v1alpha1.RepositoryDigestMirrors{ + { + Source: "i.do.not.exist/repo/image", + Mirrors: []string{ + "i.exist/namespace/img", + }, + }, + }, + }, + }, + }, + }, + { + name: "multiple unrelated rules", + ref: "i.do.not.exist/repo/image:latest", + res: []reference.DockerImageReference{ + { + Registry: "i.do.not.exist", + Namespace: "repo", + Name: "image", + }, + }, + rules: []runtime.Object{ + &v1alpha1.ImageContentSourcePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "icsp-rule", + }, + Spec: v1alpha1.ImageContentSourcePolicySpec{ + RepositoryDigestMirrors: []v1alpha1.RepositoryDigestMirrors{ + { + Source: "unrelated.io/repo/image", + Mirrors: []string{ + "i.exist/namespace/img", + }, + }, + { + Source: "also.unrelated.io/repo/image", + Mirrors: []string{ + "i.exist/namespace/img", + }, + }, + }, + }, + }, + }, + }, + { + name: "multiple related rules", + ref: "i.do.not.exist/repo/image:latest", + res: []reference.DockerImageReference{ + { + Registry: "i.exist", + Namespace: "namespace", + Name: "image", + }, + { + Registry: "i.also.exist", + Namespace: "ns", + Name: "img", + }, + { + Registry: "i.do.not.exist", + Namespace: "repo", + Name: "image", + }, + }, + rules: []runtime.Object{ + &v1alpha1.ImageContentSourcePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "icsp-rule-0", + }, + Spec: v1alpha1.ImageContentSourcePolicySpec{ + RepositoryDigestMirrors: []v1alpha1.RepositoryDigestMirrors{ + { + Source: "i.do.not.exist/repo/image", + Mirrors: []string{ + "i.exist/namespace/image", + }, + }, + }, + }, + }, + &v1alpha1.ImageContentSourcePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "icsp-rule-1", + }, + Spec: v1alpha1.ImageContentSourcePolicySpec{ + RepositoryDigestMirrors: []v1alpha1.RepositoryDigestMirrors{ + { + Source: "i.do.not.exist/repo/image", + Mirrors: []string{ + "i.also.exist/ns/img", + }, + }, + }, + }, + }, + }, + }, + { + name: "dedup rules", + ref: "i.do.not.exist/repo/image:latest", + res: []reference.DockerImageReference{ + { + Registry: "i.exist", + Namespace: "namespace", + Name: "image", + }, + { + Registry: "i.do.not.exist", + Namespace: "repo", + Name: "image", + }, + }, + rules: []runtime.Object{ + &v1alpha1.ImageContentSourcePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "icsp-rule-0", + }, + Spec: v1alpha1.ImageContentSourcePolicySpec{ + RepositoryDigestMirrors: []v1alpha1.RepositoryDigestMirrors{ + { + Source: "i.do.not.exist/repo/image", + Mirrors: []string{ + "i.exist/namespace/image", + }, + }, + }, + }, + }, + &v1alpha1.ImageContentSourcePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "icsp-rule-1", + }, + Spec: v1alpha1.ImageContentSourcePolicySpec{ + RepositoryDigestMirrors: []v1alpha1.RepositoryDigestMirrors{ + { + Source: "i.do.not.exist/repo/image", + Mirrors: []string{ + "i.exist/namespace/image", + }, + }, + }, + }, + }, + }, + }, + { + name: "invalid mirror source reference", + ref: "i.do.not.exist/repo/image:latest", + res: []reference.DockerImageReference{ + { + Registry: "i.do.not.exist", + Namespace: "repo", + Name: "image", + }, + }, + rules: []runtime.Object{ + &v1alpha1.ImageContentSourcePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "icsp-rule", + }, + Spec: v1alpha1.ImageContentSourcePolicySpec{ + RepositoryDigestMirrors: []v1alpha1.RepositoryDigestMirrors{ + { + Source: "-92 Date: Tue, 8 Jun 2021 14:11:04 +0200 Subject: [PATCH 3/3] Using vendored packages during integration tests Adding "-mod vendor" flag to go test command. --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 3e37a8132e..d7cb4398ef 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,7 @@ OUT_DIR = _output OS_OUTPUT_GOPATH ?= 1 +TESTFLAGS ?= -mod vendor export GOFLAGS export TESTFLAGS