diff --git a/pkg/operator/controller/ingress/controller.go b/pkg/operator/controller/ingress/controller.go index 108fcad159..c595b31260 100644 --- a/pkg/operator/controller/ingress/controller.go +++ b/pkg/operator/controller/ingress/controller.go @@ -13,6 +13,7 @@ import ( logf "github.com/openshift/cluster-ingress-operator/pkg/log" "github.com/openshift/cluster-ingress-operator/pkg/manifests" operatorcontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" + oputil "github.com/openshift/cluster-ingress-operator/pkg/util" retryable "github.com/openshift/cluster-ingress-operator/pkg/util/retryableerror" "github.com/openshift/cluster-ingress-operator/pkg/util/slice" @@ -255,11 +256,15 @@ func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( if err := r.client.Get(ctx, types.NamespacedName{Name: "cluster"}, networkConfig); err != nil { return reconcile.Result{}, fmt.Errorf("failed to get network 'cluster': %v", err) } + platformStatus, err := oputil.GetPlatformStatus(r.client, infraConfig) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to determine infrastructure platform status for ingresscontroller %s/%s: %w", ingress.Namespace, ingress.Name, err) + } // Admit if necessary. Don't process until admission succeeds. If admission is // successful, immediately re-queue to refresh state. if !isAdmitted(ingress) || needsReadmission(ingress) { - if err := r.admit(ingress, ingressConfig, infraConfig); err != nil { + if err := r.admit(ingress, ingressConfig, platformStatus, dnsConfig); err != nil { switch err := err.(type) { case *admissionRejection: r.recorder.Event(ingress, "Warning", "Rejected", err.Reason) @@ -286,7 +291,7 @@ func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( } // The ingresscontroller is safe to process, so ensure it. - if err := r.ensureIngressController(ingress, dnsConfig, infraConfig, ingressConfig, apiConfig, networkConfig); err != nil { + if err := r.ensureIngressController(ingress, dnsConfig, infraConfig, platformStatus, ingressConfig, apiConfig, networkConfig); err != nil { switch e := err.(type) { case retryable.Error: log.Error(e, "got retryable error; requeueing", "after", e.After()) @@ -302,11 +307,11 @@ func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( // fields. Returns an error value, which will have a non-nil value of type // admissionRejection if the ingresscontroller was rejected, or a non-nil // value of a different type if the ingresscontroller could not be processed. -func (r *reconciler) admit(current *operatorv1.IngressController, ingressConfig *configv1.Ingress, infraConfig *configv1.Infrastructure) error { +func (r *reconciler) admit(current *operatorv1.IngressController, ingressConfig *configv1.Ingress, platformStatus *configv1.PlatformStatus, dnsConfig *configv1.DNS) error { updated := current.DeepCopy() setDefaultDomain(updated, ingressConfig) - setDefaultPublishingStrategy(updated, infraConfig) + setDefaultPublishingStrategy(updated, platformStatus) // The TLS security profile need not be defaulted. If none is set, we // get the default from the APIServer config (which is assumed to be @@ -337,6 +342,11 @@ func (r *reconciler) admit(current *operatorv1.IngressController, ingressConfig Reason: "Valid", }) updated.Status.ObservedGeneration = updated.Generation + + if !manageDNSForDomain(updated.Status.Domain, platformStatus, dnsConfig) { + r.recorder.Eventf(updated, "Warning", "DomainNotMatching", fmt.Sprintf("Domain [%s] of ingresscontroller does not match the baseDomain [%s] of the cluster DNS config, so DNS management is not supported.", updated.Status.Domain, dnsConfig.Spec.BaseDomain)) + } + if !IngressStatusesEqual(current.Status, updated.Status) { if err := r.client.Status().Update(context.TODO(), updated); err != nil { return fmt.Errorf("failed to update status: %v", err) @@ -382,11 +392,11 @@ func setDefaultDomain(ic *operatorv1.IngressController, ingressConfig *configv1. return false } -func setDefaultPublishingStrategy(ic *operatorv1.IngressController, infraConfig *configv1.Infrastructure) bool { +func setDefaultPublishingStrategy(ic *operatorv1.IngressController, platformStatus *configv1.PlatformStatus) bool { effectiveStrategy := ic.Spec.EndpointPublishingStrategy.DeepCopy() if effectiveStrategy == nil { var strategyType operatorv1.EndpointPublishingStrategyType - switch infraConfig.Status.Platform { + switch platformStatus.Type { case configv1.AWSPlatformType, configv1.AzurePlatformType, configv1.GCPPlatformType, configv1.IBMCloudPlatformType, configv1.PowerVSPlatformType, configv1.AlibabaCloudPlatformType: strategyType = operatorv1.LoadBalancerServiceStrategyType case configv1.LibvirtPlatformType: @@ -841,7 +851,7 @@ func (r *reconciler) ensureIngressDeleted(ingress *operatorv1.IngressController) // given ingresscontroller. Any error values are collected into either a // retryable.Error value, if any of the error values are retryable, or else an // Aggregate error value. -func (r *reconciler) ensureIngressController(ci *operatorv1.IngressController, dnsConfig *configv1.DNS, infraConfig *configv1.Infrastructure, ingressConfig *configv1.Ingress, apiConfig *configv1.APIServer, networkConfig *configv1.Network) error { +func (r *reconciler) ensureIngressController(ci *operatorv1.IngressController, dnsConfig *configv1.DNS, infraConfig *configv1.Infrastructure, platformStatus *configv1.PlatformStatus, ingressConfig *configv1.Ingress, apiConfig *configv1.APIServer, networkConfig *configv1.Network) error { // Before doing anything at all with the controller, ensure it has a finalizer // so we can clean up later. if !slice.ContainsString(ci.Finalizers, manifests.IngressControllerFinalizer) { @@ -892,7 +902,7 @@ func (r *reconciler) ensureIngressController(ci *operatorv1.IngressController, d haveClientCAConfigmap = true } - haveDepl, deployment, err := r.ensureRouterDeployment(ci, infraConfig, ingressConfig, apiConfig, networkConfig, haveClientCAConfigmap, clientCAConfigmap) + haveDepl, deployment, err := r.ensureRouterDeployment(ci, infraConfig, ingressConfig, apiConfig, networkConfig, haveClientCAConfigmap, clientCAConfigmap, platformStatus) if err != nil { errs = append(errs, fmt.Errorf("failed to ensure deployment: %v", err)) return utilerrors.NewAggregate(errs) @@ -912,11 +922,11 @@ func (r *reconciler) ensureIngressController(ci *operatorv1.IngressController, d var lbService *corev1.Service var wildcardRecord *iov1.DNSRecord - if haveLB, lb, err := r.ensureLoadBalancerService(ci, deploymentRef, infraConfig); err != nil { + if haveLB, lb, err := r.ensureLoadBalancerService(ci, deploymentRef, platformStatus); err != nil { errs = append(errs, fmt.Errorf("failed to ensure load balancer service for %s: %v", ci.Name, err)) } else { lbService = lb - if _, record, err := r.ensureWildcardDNSRecord(ci, lbService, haveLB); err != nil { + if _, record, err := r.ensureWildcardDNSRecord(ci, platformStatus, dnsConfig, lbService, haveLB); err != nil { errs = append(errs, fmt.Errorf("failed to ensure wildcard dnsrecord for %s: %v", ci.Name, err)) } else { wildcardRecord = record @@ -951,7 +961,7 @@ func (r *reconciler) ensureIngressController(ci *operatorv1.IngressController, d errs = append(errs, fmt.Errorf("failed to list pods in namespace %q: %v", operatorcontroller.DefaultOperatorNamespace, err)) } - syncStatusErr, updated := r.syncIngressControllerStatus(ci, deployment, deploymentRef, pods.Items, lbService, operandEvents.Items, wildcardRecord, dnsConfig, infraConfig) + syncStatusErr, updated := r.syncIngressControllerStatus(ci, deployment, deploymentRef, pods.Items, lbService, operandEvents.Items, wildcardRecord, dnsConfig, platformStatus) errs = append(errs, syncStatusErr) // If syncIngressControllerStatus updated our ingress status, it's important we query for that new object. diff --git a/pkg/operator/controller/ingress/controller_test.go b/pkg/operator/controller/ingress/controller_test.go index fe7e46b8c6..8b03999f36 100644 --- a/pkg/operator/controller/ingress/controller_test.go +++ b/pkg/operator/controller/ingress/controller_test.go @@ -135,96 +135,94 @@ func TestSetDefaultPublishingStrategySetsPlatformDefaults(t *testing.T) { }, }, } - makeInfra = func(platform configv1.PlatformType) *configv1.Infrastructure { - return &configv1.Infrastructure{ - Status: configv1.InfrastructureStatus{ - Platform: platform, - }, + makePlatformStatus = func(platform configv1.PlatformType) *configv1.PlatformStatus { + return &configv1.PlatformStatus{ + Type: platform, } } ) testCases := []struct { - name string - infraConfig *configv1.Infrastructure - expectedIC *operatorv1.IngressController + name string + platformStatus *configv1.PlatformStatus + expectedIC *operatorv1.IngressController }{ { - name: "Alibaba", - infraConfig: makeInfra(configv1.AlibabaCloudPlatformType), - expectedIC: ingressControllerWithLoadBalancer, + name: "Alibaba", + platformStatus: makePlatformStatus(configv1.AlibabaCloudPlatformType), + expectedIC: ingressControllerWithLoadBalancer, }, { - name: "AWS", - infraConfig: makeInfra(configv1.AWSPlatformType), - expectedIC: ingressControllerWithLoadBalancer, + name: "AWS", + platformStatus: makePlatformStatus(configv1.AWSPlatformType), + expectedIC: ingressControllerWithLoadBalancer, }, { - name: "Azure", - infraConfig: makeInfra(configv1.AzurePlatformType), - expectedIC: ingressControllerWithLoadBalancer, + name: "Azure", + platformStatus: makePlatformStatus(configv1.AzurePlatformType), + expectedIC: ingressControllerWithLoadBalancer, }, { - name: "Bare metal", - infraConfig: makeInfra(configv1.BareMetalPlatformType), - expectedIC: ingressControllerWithHostNetwork, + name: "Bare metal", + platformStatus: makePlatformStatus(configv1.BareMetalPlatformType), + expectedIC: ingressControllerWithHostNetwork, }, { - name: "Equinix Metal", - infraConfig: makeInfra(configv1.EquinixMetalPlatformType), - expectedIC: ingressControllerWithHostNetwork, + name: "Equinix Metal", + platformStatus: makePlatformStatus(configv1.EquinixMetalPlatformType), + expectedIC: ingressControllerWithHostNetwork, }, { - name: "GCP", - infraConfig: makeInfra(configv1.GCPPlatformType), - expectedIC: ingressControllerWithLoadBalancer, + name: "GCP", + platformStatus: makePlatformStatus(configv1.GCPPlatformType), + expectedIC: ingressControllerWithLoadBalancer, }, { - name: "IBM Cloud", - infraConfig: makeInfra(configv1.IBMCloudPlatformType), - expectedIC: ingressControllerWithLoadBalancer, + name: "IBM Cloud", + platformStatus: makePlatformStatus(configv1.IBMCloudPlatformType), + expectedIC: ingressControllerWithLoadBalancer, }, { - name: "Libvirt", - infraConfig: makeInfra(configv1.LibvirtPlatformType), - expectedIC: ingressControllerWithHostNetwork, + name: "Libvirt", + platformStatus: makePlatformStatus(configv1.LibvirtPlatformType), + expectedIC: ingressControllerWithHostNetwork, }, { - name: "No platform", - infraConfig: makeInfra(configv1.NonePlatformType), - expectedIC: ingressControllerWithHostNetwork, + name: "No platform", + platformStatus: makePlatformStatus(configv1.NonePlatformType), + expectedIC: ingressControllerWithHostNetwork, }, { - name: "OpenStack", - infraConfig: makeInfra(configv1.OpenStackPlatformType), - expectedIC: ingressControllerWithHostNetwork, + name: "OpenStack", + platformStatus: makePlatformStatus(configv1.OpenStackPlatformType), + expectedIC: ingressControllerWithHostNetwork, }, { - name: "Power VS", - infraConfig: makeInfra(configv1.PowerVSPlatformType), - expectedIC: ingressControllerWithLoadBalancer, + name: "Power VS", + platformStatus: makePlatformStatus(configv1.PowerVSPlatformType), + expectedIC: ingressControllerWithLoadBalancer, }, { - name: "RHV", - infraConfig: makeInfra(configv1.OvirtPlatformType), - expectedIC: ingressControllerWithHostNetwork, + name: "RHV", + platformStatus: makePlatformStatus(configv1.OvirtPlatformType), + expectedIC: ingressControllerWithHostNetwork, }, { - name: "vSphere", - infraConfig: makeInfra(configv1.VSpherePlatformType), - expectedIC: ingressControllerWithHostNetwork, + name: "vSphere", + platformStatus: makePlatformStatus(configv1.VSpherePlatformType), + expectedIC: ingressControllerWithHostNetwork, }, { - name: "Nutanix", - infraConfig: makeInfra(configv1.NutanixPlatformType), - expectedIC: ingressControllerWithHostNetwork, + name: "Nutanix", + platformStatus: makePlatformStatus(configv1.NutanixPlatformType), + expectedIC: ingressControllerWithHostNetwork, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ic := &operatorv1.IngressController{} - infraConfig := tc.infraConfig.DeepCopy() - if actualResult := setDefaultPublishingStrategy(ic, infraConfig); actualResult != true { + platformStatus := tc.platformStatus.DeepCopy() + if actualResult := setDefaultPublishingStrategy(ic, platformStatus); actualResult != true { t.Errorf("expected result %v, got %v", true, actualResult) } if diff := cmp.Diff(tc.expectedIC, ic); len(diff) != 0 { @@ -506,12 +504,10 @@ func TestSetDefaultPublishingStrategyHandlesUpdates(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ic := tc.ic.DeepCopy() - infraConfig := &configv1.Infrastructure{ - Status: configv1.InfrastructureStatus{ - Platform: configv1.NonePlatformType, - }, + platformStatus := &configv1.PlatformStatus{ + Type: configv1.NonePlatformType, } - if actualResult := setDefaultPublishingStrategy(ic, infraConfig); actualResult != tc.expectedResult { + if actualResult := setDefaultPublishingStrategy(ic, platformStatus); actualResult != tc.expectedResult { t.Errorf("expected result %v, got %v", tc.expectedResult, actualResult) } if diff := cmp.Diff(tc.expectedIC, ic); len(diff) != 0 { diff --git a/pkg/operator/controller/ingress/deployment.go b/pkg/operator/controller/ingress/deployment.go index 7da1ed0d16..6a9c480d6d 100644 --- a/pkg/operator/controller/ingress/deployment.go +++ b/pkg/operator/controller/ingress/deployment.go @@ -24,8 +24,6 @@ import ( "github.com/openshift/cluster-ingress-operator/pkg/manifests" "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" - oputil "github.com/openshift/cluster-ingress-operator/pkg/util" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -109,16 +107,12 @@ const ( // ensureRouterDeployment ensures the router deployment exists for a given // ingresscontroller. -func (r *reconciler) ensureRouterDeployment(ci *operatorv1.IngressController, infraConfig *configv1.Infrastructure, ingressConfig *configv1.Ingress, apiConfig *configv1.APIServer, networkConfig *configv1.Network, haveClientCAConfigmap bool, clientCAConfigmap *corev1.ConfigMap) (bool, *appsv1.Deployment, error) { +func (r *reconciler) ensureRouterDeployment(ci *operatorv1.IngressController, infraConfig *configv1.Infrastructure, ingressConfig *configv1.Ingress, apiConfig *configv1.APIServer, networkConfig *configv1.Network, haveClientCAConfigmap bool, clientCAConfigmap *corev1.ConfigMap, platformStatus *configv1.PlatformStatus) (bool, *appsv1.Deployment, error) { haveDepl, current, err := r.currentRouterDeployment(ci) if err != nil { return false, nil, err } - platform, err := oputil.GetPlatformStatus(r.client, infraConfig) - if err != nil { - return false, nil, fmt.Errorf("failed to determine infrastructure platform status for ingresscontroller %s/%s: %v", ci.Namespace, ci.Name, err) - } - proxyNeeded, err := IsProxyProtocolNeeded(ci, platform) + proxyNeeded, err := IsProxyProtocolNeeded(ci, platformStatus) if err != nil { return false, nil, fmt.Errorf("failed to determine if proxy protocol is needed for ingresscontroller %s/%s: %v", ci.Namespace, ci.Name, err) } diff --git a/pkg/operator/controller/ingress/dns.go b/pkg/operator/controller/ingress/dns.go index 78e72691f6..2f66804bd7 100644 --- a/pkg/operator/controller/ingress/dns.go +++ b/pkg/operator/controller/ingress/dns.go @@ -3,16 +3,16 @@ package ingress import ( "context" "fmt" + "strings" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" iov1 "github.com/openshift/api/operatoringress/v1" "github.com/openshift/cluster-ingress-operator/pkg/manifests" "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" - - operatorv1 "github.com/openshift/api/operator/v1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -29,11 +29,15 @@ const defaultRecordTTL int64 = 30 // ensureWildcardDNSRecord will create DNS records for the given LB service. // If service is nil (haveLBS is false), nothing is done. -func (r *reconciler) ensureWildcardDNSRecord(ic *operatorv1.IngressController, service *corev1.Service, haveLBS bool) (bool, *iov1.DNSRecord, error) { +func (r *reconciler) ensureWildcardDNSRecord(ic *operatorv1.IngressController, platformStatus *configv1.PlatformStatus, dnsConfig *configv1.DNS, service *corev1.Service, haveLBS bool) (bool, *iov1.DNSRecord, error) { if !haveLBS { return false, nil, nil } + if !manageDNSForDomain(ic.Status.Domain, platformStatus, dnsConfig) { + return false, nil, nil + } + wantWC, desired := desiredWildcardDNSRecord(ic, service) haveWC, current, err := r.currentWildcardDNSRecord(ic) if err != nil { @@ -189,3 +193,21 @@ func dnsRecordChanged(current, expected *iov1.DNSRecord) (bool, *iov1.DNSRecord) updated.Spec = expected.Spec return true, updated } + +// manageDNSForDomain returns true if the given domain contains the baseDomain +// of the cluster DNS config. It is only used for AWS in the beginning, and will be expanded to other clouds +// once we know there are no users depending on this. +// See https://bugzilla.redhat.com/show_bug.cgi?id=2041616 +func manageDNSForDomain(domain string, status *configv1.PlatformStatus, dnsConfig *configv1.DNS) bool { + if len(domain) == 0 { + return false + } + + mustContain := "." + dnsConfig.Spec.BaseDomain + switch status.Type { + case configv1.AWSPlatformType: + return strings.HasSuffix(domain, mustContain) + default: + return true + } +} diff --git a/pkg/operator/controller/ingress/dns_test.go b/pkg/operator/controller/ingress/dns_test.go index 2edd5edc4a..75a68a1c98 100644 --- a/pkg/operator/controller/ingress/dns_test.go +++ b/pkg/operator/controller/ingress/dns_test.go @@ -7,9 +7,9 @@ import ( "gopkg.in/yaml.v2" - iov1 "github.com/openshift/api/operatoringress/v1" - + configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" + iov1 "github.com/openshift/api/operatoringress/v1" corev1 "k8s.io/api/core/v1" @@ -109,6 +109,74 @@ func TestDesiredWildcardDNSRecord(t *testing.T) { } } +func TestManageDNSForDomain(t *testing.T) { + tests := []struct { + name string + domain string + baseDomain string + platformType configv1.PlatformType + expected bool + }{ + { + name: "empty domain", + domain: "", + baseDomain: "openshift.example.com", + expected: false, + }, + { + name: "domain matches the baseDomain", + domain: "apps.openshift.example.com", + baseDomain: "openshift.example.com", + platformType: configv1.AWSPlatformType, + expected: true, + }, + { + name: "domain matches single segment baseDomain", + domain: "openshift.example.com", + baseDomain: "example.com", + platformType: configv1.AWSPlatformType, + expected: true, + }, + { + name: "domain does not match the baseDomain on AWS", + domain: "test.local", + baseDomain: "openshift.example.com", + platformType: configv1.AWSPlatformType, + expected: false, + }, + { + name: "domain does not match prematurely", + domain: "testopenshift.example.com", + baseDomain: "openshift.example.com", + platformType: configv1.AWSPlatformType, + expected: false, + }, + { + name: "domain does not match the baseDomain on unsupported platform", + domain: "test.local", + baseDomain: "openshift.example.com", + platformType: configv1.NonePlatformType, + expected: true, + }, + } + + for _, tc := range tests { + status := configv1.PlatformStatus{ + Type: tc.platformType, + } + + dnsConfig := configv1.DNS{ + Spec: configv1.DNSSpec{ + BaseDomain: tc.baseDomain, + }, + } + actual := manageDNSForDomain(tc.domain, &status, &dnsConfig) + if actual != tc.expected { + t.Errorf("%q: expected to be %v, got %v", tc.name, tc.expected, actual) + } + } +} + func toYaml(obj interface{}) string { yml, _ := yaml.Marshal(obj) return string(yml) diff --git a/pkg/operator/controller/ingress/load_balancer_service.go b/pkg/operator/controller/ingress/load_balancer_service.go index 04361230f3..7c43aa86f0 100644 --- a/pkg/operator/controller/ingress/load_balancer_service.go +++ b/pkg/operator/controller/ingress/load_balancer_service.go @@ -13,8 +13,6 @@ import ( "github.com/openshift/cluster-ingress-operator/pkg/manifests" "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" - oputil "github.com/openshift/cluster-ingress-operator/pkg/util" - corev1 "k8s.io/api/core/v1" configv1 "github.com/openshift/api/config/v1" @@ -253,12 +251,8 @@ var ( // ensureLoadBalancerService creates an LB service if one is desired but absent. // Always returns the current LB service if one exists (whether it already // existed or was created during the course of the function). -func (r *reconciler) ensureLoadBalancerService(ci *operatorv1.IngressController, deploymentRef metav1.OwnerReference, infraConfig *configv1.Infrastructure) (bool, *corev1.Service, error) { - platform, err := oputil.GetPlatformStatus(r.client, infraConfig) - if err != nil { - return false, nil, fmt.Errorf("failed to determine infrastructure platform status for ingresscontroller %s/%s: %v", ci.Namespace, ci.Name, err) - } - wantLBS, desiredLBService, err := desiredLoadBalancerService(ci, deploymentRef, platform) +func (r *reconciler) ensureLoadBalancerService(ci *operatorv1.IngressController, deploymentRef metav1.OwnerReference, platformStatus *configv1.PlatformStatus) (bool, *corev1.Service, error) { + wantLBS, desiredLBService, err := desiredLoadBalancerService(ci, deploymentRef, platformStatus) if err != nil { return false, nil, err } @@ -303,7 +297,7 @@ func (r *reconciler) ensureLoadBalancerService(ci *operatorv1.IngressController, if _, ok := ci.Annotations[autoDeleteLoadBalancerAnnotation]; ok { deleteIfScopeChanged = true } - if updated, err := r.updateLoadBalancerService(currentLBService, desiredLBService, platform, deleteIfScopeChanged); err != nil { + if updated, err := r.updateLoadBalancerService(currentLBService, desiredLBService, platformStatus, deleteIfScopeChanged); err != nil { return true, currentLBService, fmt.Errorf("failed to update load balancer service: %v", err) } else if updated { return r.currentLoadBalancerService(ci) diff --git a/pkg/operator/controller/ingress/status.go b/pkg/operator/controller/ingress/status.go index 9cd5007fa4..28cb3e477a 100644 --- a/pkg/operator/controller/ingress/status.go +++ b/pkg/operator/controller/ingress/status.go @@ -50,18 +50,13 @@ type expectedCondition struct { // syncIngressControllerStatus computes the current status of ic and // updates status upon any changes since last sync. -func (r *reconciler) syncIngressControllerStatus(ic *operatorv1.IngressController, deployment *appsv1.Deployment, deploymentRef metav1.OwnerReference, pods []corev1.Pod, service *corev1.Service, operandEvents []corev1.Event, wildcardRecord *iov1.DNSRecord, dnsConfig *configv1.DNS, infraConfig *configv1.Infrastructure) (error, bool) { +func (r *reconciler) syncIngressControllerStatus(ic *operatorv1.IngressController, deployment *appsv1.Deployment, deploymentRef metav1.OwnerReference, pods []corev1.Pod, service *corev1.Service, operandEvents []corev1.Event, wildcardRecord *iov1.DNSRecord, dnsConfig *configv1.DNS, platformStatus *configv1.PlatformStatus) (error, bool) { updatedIc := false selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { return fmt.Errorf("deployment has invalid spec.selector: %v", err), updatedIc } - platform, err := oputil.GetPlatformStatus(r.client, infraConfig) - if err != nil { - return fmt.Errorf("failed to determine infrastructure platform status for ingresscontroller %s/%s: %w", ic.Namespace, ic.Name, err), updatedIc - } - secret := &corev1.Secret{} secretName := controller.RouterEffectiveDefaultCertificateSecretName(ic, deployment.Namespace) if err := r.client.Get(context.TODO(), secretName, secret); err != nil && !apierrors.IsNotFound(err) { @@ -79,13 +74,13 @@ func (r *reconciler) syncIngressControllerStatus(ic *operatorv1.IngressControlle updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeDeploymentReplicasMinAvailableCondition(deployment)) updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeDeploymentReplicasAllAvailableCondition(deployment)) updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeLoadBalancerStatus(ic, service, operandEvents)...) - updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeDNSStatus(ic, wildcardRecord, dnsConfig)...) + updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeDNSStatus(ic, wildcardRecord, platformStatus, dnsConfig)...) updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeIngressAvailableCondition(updated.Status.Conditions)) degradedCondition, err := computeIngressDegradedCondition(updated.Status.Conditions, updated.Name) errs = append(errs, err) - updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeIngressProgressingCondition(updated.Status.Conditions, ic, service, platform)) + updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeIngressProgressingCondition(updated.Status.Conditions, ic, service, platformStatus)) updated.Status.Conditions = MergeConditions(updated.Status.Conditions, degradedCondition) - updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeIngressUpgradeableCondition(ic, deploymentRef, service, platform, secret)) + updated.Status.Conditions = MergeConditions(updated.Status.Conditions, computeIngressUpgradeableCondition(ic, deploymentRef, service, platformStatus, secret)) updated.Status.Conditions = PruneConditions(updated.Status.Conditions) @@ -804,7 +799,7 @@ func getEventsByReason(events []corev1.Event, component, reason string) []corev1 return filtered } -func computeDNSStatus(ic *operatorv1.IngressController, wildcardRecord *iov1.DNSRecord, dnsConfig *configv1.DNS) []operatorv1.OperatorCondition { +func computeDNSStatus(ic *operatorv1.IngressController, wildcardRecord *iov1.DNSRecord, status *configv1.PlatformStatus, dnsConfig *configv1.DNS) []operatorv1.OperatorCondition { if dnsConfig.Spec.PublicZone == nil && dnsConfig.Spec.PrivateZone == nil { return []operatorv1.OperatorCondition{ @@ -828,6 +823,17 @@ func computeDNSStatus(ic *operatorv1.IngressController, wildcardRecord *iov1.DNS } } + if !manageDNSForDomain(ic.Status.Domain, status, dnsConfig) { + return []operatorv1.OperatorCondition{ + { + Type: operatorv1.DNSManagedIngressConditionType, + Status: operatorv1.ConditionFalse, + Reason: "DomainNotMatching", + Message: "DNS management is not supported for ingresscontrollers with domain not matching the baseDomain of the cluster DNS config.", + }, + } + } + conditions := []operatorv1.OperatorCondition{ { Type: operatorv1.DNSManagedIngressConditionType, diff --git a/pkg/operator/controller/ingress/status_test.go b/pkg/operator/controller/ingress/status_test.go index dc96fba612..f8866f4e90 100644 --- a/pkg/operator/controller/ingress/status_test.go +++ b/pkg/operator/controller/ingress/status_test.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/types" utilclock "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/test/e2e/all_test.go b/test/e2e/all_test.go index d1a1f07cdf..df400f959c 100644 --- a/test/e2e/all_test.go +++ b/test/e2e/all_test.go @@ -27,6 +27,7 @@ func TestAll(t *testing.T) { t.Run("TestCreateSecretThenIngressController", TestCreateSecretThenIngressController) t.Run("TestCustomErrorpages", TestCustomErrorpages) t.Run("TestCustomIngressClass", TestCustomIngressClass) + t.Run("TestDomainNotMatchingBase", TestDomainNotMatchingBase) t.Run("TestDynamicConfigManagerUnsupportedConfigOverride", TestDynamicConfigManagerUnsupportedConfigOverride) t.Run("TestForwardedHeaderPolicyAppend", TestForwardedHeaderPolicyAppend) t.Run("TestForwardedHeaderPolicyIfNone", TestForwardedHeaderPolicyIfNone) diff --git a/test/e2e/domain_not_matching_base_test.go b/test/e2e/domain_not_matching_base_test.go new file mode 100644 index 0000000000..8fcb4565b6 --- /dev/null +++ b/test/e2e/domain_not_matching_base_test.go @@ -0,0 +1,61 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "context" + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + iov1 "github.com/openshift/api/operatoringress/v1" + "github.com/openshift/cluster-ingress-operator/pkg/operator/controller" + ingresscontroller "github.com/openshift/cluster-ingress-operator/pkg/operator/controller/ingress" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "testing" + "time" +) + +func TestDomainNotMatchingBase(t *testing.T) { + t.Parallel() + if infraConfig.Status.Platform != configv1.AWSPlatformType { + t.Skip("test skipped on non-aws platform") + return + } + + icName := types.NamespacedName{Namespace: operatorNamespace, Name: "domain-not-matching"} + domain := icName.Name + ".local" + ic := newLoadBalancerController(icName, domain) + if err := kclient.Create(context.TODO(), ic); err != nil { + t.Fatalf("failed to create ingresscontroller %s: %v", icName, err) + } + defer assertIngressControllerDeleted(t, kclient, ic) + + //Ensure the DNSManaged=False condition + t.Logf("waiting for ingresscontroller %v", icName) + conditions := []operatorv1.OperatorCondition{ + {Type: operatorv1.IngressControllerAvailableConditionType, Status: operatorv1.ConditionTrue}, + {Type: operatorv1.LoadBalancerManagedIngressConditionType, Status: operatorv1.ConditionTrue}, + {Type: operatorv1.LoadBalancerReadyIngressConditionType, Status: operatorv1.ConditionTrue}, + {Type: operatorv1.DNSManagedIngressConditionType, Status: operatorv1.ConditionFalse}, + {Type: ingresscontroller.IngressControllerAdmittedConditionType, Status: operatorv1.ConditionTrue}, + } + if err := waitForIngressControllerCondition(t, kclient, 5*time.Minute, icName, conditions...); err != nil { + t.Fatalf("failed to observe expected conditions: %v", err) + } + + ic, err := getIngressController(t, kclient, icName, 1*time.Minute) + if err != nil { + t.Fatalf("failed to get ingress controller: %v", err) + } + + //Ensure there is no DNS record created + wildcardRecordName := controller.WildcardDNSRecordName(ic) + wildcardRecord := &iov1.DNSRecord{} + if err := kclient.Get(context.TODO(), wildcardRecordName, wildcardRecord); err == nil { + t.Fatalf("Expected to find no wildcard dnsrecord, but found one %s", wildcardRecordName) + } + if err != nil && !errors.IsNotFound(err) { + t.Fatalf("Unexpected error while looking for a dnsrecord: %v", err) + } +}