diff --git a/pkg/manifests/manifests.go b/pkg/manifests/manifests.go index 940f8d01bd..f9cfdf6552 100644 --- a/pkg/manifests/manifests.go +++ b/pkg/manifests/manifests.go @@ -34,6 +34,9 @@ const ( // Annotation used to inform the certificate generation service to // generate a cluster-signed certificate and populate the secret. ServingCertSecretAnnotation = "service.alpha.openshift.io/serving-cert-secret-name" + + // Annotation used to enable the proxy protocol on the AWS load balancer. + AWSLBProxyProtocolAnnotation = "service.beta.kubernetes.io/aws-load-balancer-proxy-protocol" ) func MustAssetReader(asset string) io.Reader { @@ -147,6 +150,14 @@ func (f *Factory) RouterDeployment(cr *ingressv1alpha1.ClusterIngress) (*appsv1. env = append(env, corev1.EnvVar{Name: "ROUTER_CANONICAL_HOSTNAME", Value: *cr.Spec.IngressDomain}) } + if cr.Spec.HighAvailability != nil && cr.Spec.HighAvailability.Type == ingressv1alpha1.CloudClusterIngressHA { + // For now, check if we are on AWS. This can really be done for + // for any external [cloud] LBs that support the proxy protocol. + if f.config.Platform == configv1.AWSPlatform { + env = append(env, corev1.EnvVar{Name: "ROUTER_USE_PROXY_PROTOCOL", Value: "true"}) + } + } + if cr.Spec.NodePlacement != nil { if cr.Spec.NodePlacement.NodeSelector != nil { nodeSelector, err := metav1.LabelSelectorAsMap(cr.Spec.NodePlacement.NodeSelector) @@ -258,6 +269,13 @@ func (f *Factory) RouterServiceCloud(cr *ingressv1alpha1.ClusterIngress) (*corev } s.Spec.Selector["router"] = name + if f.config.Platform == configv1.AWSPlatform { + if s.Annotations == nil { + s.Annotations = map[string]string{} + } + s.Annotations[AWSLBProxyProtocolAnnotation] = "*" + } + return s, nil } diff --git a/pkg/manifests/manifests_test.go b/pkg/manifests/manifests_test.go index dec738b9cf..c153f16aca 100644 --- a/pkg/manifests/manifests_test.go +++ b/pkg/manifests/manifests_test.go @@ -2,6 +2,7 @@ package manifests import ( "fmt" + "strconv" "testing" "time" @@ -14,7 +15,10 @@ import ( ) func TestManifests(t *testing.T) { - config := operatorconfig.Config{RouterImage: "quay.io/openshift/router:latest"} + config := operatorconfig.Config{ + RouterImage: "quay.io/openshift/router:latest", + Platform: configv1.AWSPlatform, + } f := NewFactory(config) ci := &ingressv1alpha1.ClusterIngress{ @@ -96,6 +100,19 @@ func TestManifests(t *testing.T) { t.Error("router Deployment has no default node selector") } + proxyProtocolEnabled := false + for _, envVar := range deployment.Spec.Template.Spec.Containers[0].Env { + if envVar.Name == "ROUTER_USE_PROXY_PROTOCOL" { + if v, err := strconv.ParseBool(envVar.Value); err == nil { + proxyProtocolEnabled = v + } + break + } + } + if proxyProtocolEnabled { + t.Errorf("router Deployment unexpected proxy protocol") + } + if deployment.Spec.Template.Spec.Volumes[0].Secret == nil { t.Error("router Deployment has no secret volume") } @@ -141,6 +158,19 @@ func TestManifests(t *testing.T) { t.Errorf("expected empty readiness probe host, got %q", deployment.Spec.Template.Spec.Containers[0].ReadinessProbe.Handler.HTTPGet.Host) } + proxyProtocolEnabled = false + for _, envVar := range deployment.Spec.Template.Spec.Containers[0].Env { + if envVar.Name == "ROUTER_USE_PROXY_PROTOCOL" { + if v, err := strconv.ParseBool(envVar.Value); err == nil { + proxyProtocolEnabled = v + } + break + } + } + if !proxyProtocolEnabled { + t.Errorf("router Deployment expected proxy protocol") + } + secretName := fmt.Sprintf("secret-%v", time.Now().UnixNano()) ci.Spec.DefaultCertificateSecret = &secretName ci.Spec.HighAvailability.Type = ingressv1alpha1.UserDefinedClusterIngressHA diff --git a/test/e2e/operator_test.go b/test/e2e/operator_test.go index 2c4a320f03..f251c08769 100644 --- a/test/e2e/operator_test.go +++ b/test/e2e/operator_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "os" + "strconv" "testing" "time" @@ -157,6 +158,95 @@ func TestRouterServiceInternalEndpoints(t *testing.T) { } } +func TestClusterProxyProtocol(t *testing.T) { + cl, ns, err := getClient() + if err != nil { + t.Fatal(err) + } + + infraConfig := &configv1.Infrastructure{} + err = cl.Get(context.TODO(), types.NamespacedName{Name: "cluster"}, infraConfig) + if err != nil { + t.Fatalf("failed to get infrastructure config: %v", err) + } + + if infraConfig.Status.Platform != configv1.AWSPlatform { + t.Skip("test skipped on non-aws platform") + return + } + + ci := &ingressv1alpha1.ClusterIngress{} + err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { + if err := cl.Get(context.TODO(), types.NamespacedName{Namespace: ns, Name: "default"}, ci); err != nil { + return false, nil + } + return true, nil + }) + if err != nil { + t.Fatalf("failed to get default ClusterIngress: %v", err) + } + + // Wait for the router deployment to exist. + deployment := &appsv1.Deployment{} + err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { + if err := cl.Get(context.TODO(), types.NamespacedName{Namespace: "openshift-ingress", Name: fmt.Sprintf("router-%s", ci.Name)}, deployment); err != nil { + return false, nil + } + return true, nil + }) + if err != nil { + t.Fatalf("failed to get default router deployment: %v", err) + } + + // Ensure proxy protocol is enabled on the router deployment. + proxyProtocolEnabled := false + for _, v := range deployment.Spec.Template.Spec.Containers[0].Env { + if v.Name == "ROUTER_USE_PROXY_PROTOCOL" { + if val, err := strconv.ParseBool(v.Value); err == nil { + proxyProtocolEnabled = val + break + } + } + } + + if !proxyProtocolEnabled { + t.Fatalf("expected router deployment to enable the PROXY protocol") + } + + // Wait for the internal router service to exist. + internalService := &corev1.Service{} + err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { + if err := cl.Get(context.TODO(), types.NamespacedName{Namespace: "openshift-ingress", Name: fmt.Sprintf("router-internal-%s", ci.Name)}, internalService); err != nil { + return false, nil + } + return true, nil + }) + if err != nil { + t.Fatalf("failed to get internal router service: %v", err) + } + + // TODO: Wait for interal router service selector bug to be fixed. + // An alternative to test this would be to use an actual proxy protocol + // request to the internal router service. + // import "net" + // connection, err := net.Dial("tcp", internalService.Spec.ClusterIP) + // if err != nil { + // t.Fatalf("failed to connect to internal router service: %v", err) + // } + // defer connection.Close() + + // req := []byte("LOCAL\r\nGET / HTTP/1.1\r\nHost: non.existent.test\r\n\r\n") + // req = []byte(fmt.Sprintf("PROXY TCP4 10.9.8.7 %s 54321 443\r\nGET / HTTP/1.1\r\nHost: non.existent.test\r\n\r\n", internalService.Spec.ClusterIP)) + // connection.Write(req) + // data := make([]byte, 4096) + // if _, err := connection.Read(data); err != nil { + // t.Fatalf("failed to read response from internal router service: %v", err) + // } else { + // check response is a http response 503. + // } + +} + // TODO: Use manifest factory to build expectations // TODO: Find a way to do this test without mutating the default ingress? func TestClusterIngressUpdate(t *testing.T) {