diff --git a/acceptance/framework/connhelper/connect_helper.go b/acceptance/framework/connhelper/connect_helper.go index 2058fd955c..9db30b0281 100644 --- a/acceptance/framework/connhelper/connect_helper.go +++ b/acceptance/framework/connhelper/connect_helper.go @@ -27,6 +27,8 @@ const ( StaticClientName = "static-client" StaticServerName = "static-server" JobName = "job-client" + + retryTimeout = 120 * time.Second ) // ConnectHelper configures a Consul cluster for connect injection tests. @@ -49,6 +51,7 @@ type ConnectHelper struct { // Ctx is used to deploy Consul Ctx environment.TestContext + // UseAppNamespace is used top optionally deploy applications into a separate namespace. // If unset, the namespace associated with Ctx is used. UseAppNamespace bool @@ -62,6 +65,13 @@ type ConnectHelper struct { ConsulClient *api.Client } +// ConnHelperOpts allows for configuring optional parameters to be passed into the +// conn helper methods. This provides added flexibility, although not every value will be used +// by every method. See documentation for more details. +type ConnHelperOpts struct { + ClientType string +} + // Setup creates a new cluster using the New*Cluster function and assigns it // to the consulCluster field. func (c *ConnectHelper) Setup(t *testing.T) { @@ -90,6 +100,8 @@ func (c *ConnectHelper) Upgrade(t *testing.T) { c.consulCluster.Upgrade(t, c.helmValues()) } +// KubectlOptsForApp returns options using the -apps appended namespace if +// UseAppNamespace is enabled. Otherwise, it returns the ctx options. func (c *ConnectHelper) KubectlOptsForApp(t *testing.T) *terratestK8s.KubectlOptions { opts := c.Ctx.KubectlOptions(t) if !c.UseAppNamespace { @@ -110,7 +122,7 @@ func (c *ConnectHelper) DeployClientAndServer(t *testing.T) { // deployments because golang will execute them in reverse order // (i.e. the last registered cleanup function will be executed first). t.Cleanup(func() { - retrier := &retry.Timer{Timeout: 30 * time.Second, Wait: 100 * time.Millisecond} + retrier := &retry.Timer{Timeout: retryTimeout, Wait: 100 * time.Millisecond} retry.RunWith(retrier, t, func(r *retry.R) { tokens, _, err := c.ConsulClient.ACL().TokenList(nil) require.NoError(r, err) @@ -155,7 +167,7 @@ func (c *ConnectHelper) DeployClientAndServer(t *testing.T) { // Check that both static-server and static-client have been injected and // now have 2 containers. retry.RunWith( - &retry.Timer{Timeout: 30 * time.Second, Wait: 100 * time.Millisecond}, t, + &retry.Timer{Timeout: retryTimeout, Wait: 100 * time.Millisecond}, t, func(r *retry.R) { for _, labelSelector := range []string{"app=static-server", "app=static-client"} { podList, err := c.Ctx.KubernetesClient(t).CoreV1(). @@ -171,6 +183,18 @@ func (c *ConnectHelper) DeployClientAndServer(t *testing.T) { }) } +func (c *ConnectHelper) CreateNamespace(t *testing.T, namespace string) { + opts := c.Ctx.KubectlOptions(t) + _, err := k8s.RunKubectlAndGetOutputE(t, opts, "create", "ns", namespace) + if err != nil && strings.Contains(err.Error(), "AlreadyExists") { + return + } + require.NoError(t, err) + helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, func() { + k8s.RunKubectl(t, opts, "delete", "ns", namespace) + }) +} + // DeployJob deploys a job pod to the Kubernetes // cluster which will be used to test service mesh connectivity. If the Secure // flag is true, a pre-check is done to ensure that the ACL tokens for the test @@ -257,14 +281,7 @@ func (c *ConnectHelper) SetupAppNamespace(t *testing.T) { opts := c.KubectlOptsForApp(t) // If we are deploying apps in another namespace, create the namespace. - _, err := k8s.RunKubectlAndGetOutputE(t, opts, "create", "ns", opts.Namespace) - if err != nil && strings.Contains(err.Error(), "AlreadyExists") { - return - } - require.NoError(t, err) - helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, func() { - k8s.RunKubectl(t, opts, "delete", "ns", opts.Namespace) - }) + c.CreateNamespace(t, opts.Namespace) if c.Cfg.EnableRestrictedPSAEnforcement { // Allow anything to run in the app namespace. @@ -273,7 +290,6 @@ func (c *ConnectHelper) SetupAppNamespace(t *testing.T) { "pod-security.kubernetes.io/enforce-version=v1.24", ) } - } // CreateResolverRedirect creates a resolver that redirects to a static-server, a corresponding k8s service, @@ -291,15 +307,17 @@ func (c *ConnectHelper) CreateResolverRedirect(t *testing.T) { } // TestConnectionFailureWithoutIntention ensures the connection to the static -// server fails when no intentions are configured. -func (c *ConnectHelper) TestConnectionFailureWithoutIntention(t *testing.T, clientType ...string) { +// server fails when no intentions are configured. When provided with a ClientType option +// the client is overridden, otherwise a default will be used. +func (c *ConnectHelper) TestConnectionFailureWithoutIntention(t *testing.T, connHelperOpts ConnHelperOpts) { logger.Log(t, "checking that the connection is not successful because there's no intention") opts := c.KubectlOptsForApp(t) //Default to deploying static-client. If a client type is passed in (ex. job-client), use that instead. client := StaticClientName - if len(clientType) > 0 { - client = clientType[0] + if connHelperOpts.ClientType != "" { + client = connHelperOpts.ClientType } + if c.Cfg.EnableTransparentProxy { k8s.CheckStaticServerConnectionFailing(t, opts, client, "http://static-server") } else { @@ -307,38 +325,63 @@ func (c *ConnectHelper) TestConnectionFailureWithoutIntention(t *testing.T, clie } } +type IntentionOpts struct { + ConnHelperOpts + SourceNamespace string + DestinationNamespace string +} + // CreateIntention creates an intention for the static-server pod to connect to -// the static-client pod. -func (c *ConnectHelper) CreateIntention(t *testing.T, clientType ...string) { +// the static-client pod. opts parameter allows for overriding of some fields. If opts is empty +// then all namespaces and clients use defaults. +func (c *ConnectHelper) CreateIntention(t *testing.T, opts IntentionOpts) { logger.Log(t, "creating intention") //Default to deploying static-client. If a client type is passed in (ex. job-client), use that instead. client := StaticClientName - if len(clientType) > 0 { - client = clientType[0] + if opts.ClientType != "" { + client = opts.ClientType } - _, _, err := c.ConsulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ - Kind: api.ServiceIntentions, - Name: StaticServerName, - Sources: []*api.SourceIntention{ - { - Name: client, - Action: api.IntentionActionAllow, + + sourceNamespace := c.KubectlOptsForApp(t).Namespace + if opts.SourceNamespace != "" { + sourceNamespace = opts.SourceNamespace + } + + destinationNamespace := c.KubectlOptsForApp(t).Namespace + if opts.DestinationNamespace != "" { + destinationNamespace = opts.DestinationNamespace + } + + retrier := &retry.Timer{Timeout: retryTimeout, Wait: 100 * time.Millisecond} + retry.RunWith(retrier, t, func(r *retry.R) { + _, _, err := c.ConsulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ + Kind: api.ServiceIntentions, + Name: StaticServerName, + Namespace: destinationNamespace, + Sources: []*api.SourceIntention{ + { + Namespace: sourceNamespace, + Name: client, + Action: api.IntentionActionAllow, + }, }, - }, - }, nil) - require.NoError(t, err) + }, nil) + require.NoError(r, err) + }) } // TestConnectionSuccess ensures the static-server pod can connect to the -// static-client pod once the intention is set. -func (c *ConnectHelper) TestConnectionSuccess(t *testing.T, clientType ...string) { +// static-client pod once the intention is set. When provided with a ClientType option +// the client is overridden, otherwise a default will be used. +func (c *ConnectHelper) TestConnectionSuccess(t *testing.T, connHelperOpts ConnHelperOpts) { logger.Log(t, "checking that connection is successful") opts := c.KubectlOptsForApp(t) //Default to deploying static-client. If a client type is passed in (ex. job-client), use that instead. client := StaticClientName - if len(clientType) > 0 { - client = clientType[0] + if connHelperOpts.ClientType != "" { + client = connHelperOpts.ClientType } + if c.Cfg.EnableTransparentProxy { // todo: add an assertion that the traffic is going through the proxy k8s.CheckStaticServerConnectionSuccessful(t, opts, client, "http://static-server") diff --git a/acceptance/tests/cli/cli_install_test.go b/acceptance/tests/cli/cli_install_test.go index bb497f913f..dc0ec37500 100644 --- a/acceptance/tests/cli/cli_install_test.go +++ b/acceptance/tests/cli/cli_install_test.go @@ -55,8 +55,8 @@ func TestInstall(t *testing.T) { connHelper.Install(t) connHelper.DeployClientAndServer(t) if c.secure { - connHelper.TestConnectionFailureWithoutIntention(t) - connHelper.CreateIntention(t) + connHelper.TestConnectionFailureWithoutIntention(t, connhelper.ConnHelperOpts{}) + connHelper.CreateIntention(t, connhelper.IntentionOpts{}) } // Run proxy list and get the two results. @@ -124,7 +124,7 @@ func TestInstall(t *testing.T) { logger.Log(t, string(proxyOut)) } - connHelper.TestConnectionSuccess(t) + connHelper.TestConnectionSuccess(t, connhelper.ConnHelperOpts{}) connHelper.TestConnectionFailureWhenUnhealthy(t) }) } diff --git a/acceptance/tests/connect/connect_inject_test.go b/acceptance/tests/connect/connect_inject_test.go index 13fb41f562..b44523235c 100644 --- a/acceptance/tests/connect/connect_inject_test.go +++ b/acceptance/tests/connect/connect_inject_test.go @@ -51,11 +51,11 @@ func TestConnectInject(t *testing.T) { connHelper.Install(t) connHelper.DeployClientAndServer(t) if c.secure { - connHelper.TestConnectionFailureWithoutIntention(t) - connHelper.CreateIntention(t) + connHelper.TestConnectionFailureWithoutIntention(t, connhelper.ConnHelperOpts{}) + connHelper.CreateIntention(t, connhelper.IntentionOpts{}) } - connHelper.TestConnectionSuccess(t) + connHelper.TestConnectionSuccess(t, connhelper.ConnHelperOpts{}) connHelper.TestConnectionFailureWhenUnhealthy(t) }) } diff --git a/acceptance/tests/connect/connect_proxy_lifecycle_test.go b/acceptance/tests/connect/connect_proxy_lifecycle_test.go index d24a36883b..a94175270e 100644 --- a/acceptance/tests/connect/connect_proxy_lifecycle_test.go +++ b/acceptance/tests/connect/connect_proxy_lifecycle_test.go @@ -120,11 +120,11 @@ func TestConnectInject_ProxyLifecycleShutdown(t *testing.T) { }) if testCfg.secure { - connHelper.TestConnectionFailureWithoutIntention(t) - connHelper.CreateIntention(t) + connHelper.TestConnectionFailureWithoutIntention(t, connhelper.ConnHelperOpts{}) + connHelper.CreateIntention(t, connhelper.IntentionOpts{}) } - connHelper.TestConnectionSuccess(t) + connHelper.TestConnectionSuccess(t, connhelper.ConnHelperOpts{}) // Get static-client pod name ns := ctx.KubectlOptions(t).Namespace @@ -278,7 +278,7 @@ func TestConnectInject_ProxyLifecycleShutdownJob(t *testing.T) { } }) - connHelper.TestConnectionSuccess(t, connhelper.JobName) + connHelper.TestConnectionSuccess(t, connhelper.ConnHelperOpts{ClientType: connhelper.JobName}) // Get job-client pod name ns := ctx.KubectlOptions(t).Namespace diff --git a/acceptance/tests/fixtures/bases/service-resolver/kustomization.yaml b/acceptance/tests/fixtures/bases/service-resolver/kustomization.yaml new file mode 100644 index 0000000000..8e36fe276e --- /dev/null +++ b/acceptance/tests/fixtures/bases/service-resolver/kustomization.yaml @@ -0,0 +1,5 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resources: + - service-resolver.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/bases/service-resolver/service-resolver.yaml b/acceptance/tests/fixtures/bases/service-resolver/service-resolver.yaml new file mode 100644 index 0000000000..2e0459e381 --- /dev/null +++ b/acceptance/tests/fixtures/bases/service-resolver/service-resolver.yaml @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: consul.hashicorp.com/v1alpha1 +kind: ServiceResolver +metadata: + name: static-server diff --git a/acceptance/tests/fixtures/cases/wan-federation/dc1-ns2-static-server/kustomization.yaml b/acceptance/tests/fixtures/cases/wan-federation/dc1-ns2-static-server/kustomization.yaml new file mode 100644 index 0000000000..0775282c18 --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/dc1-ns2-static-server/kustomization.yaml @@ -0,0 +1,8 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resources: + - ../../../bases/static-server + +patchesStrategicMerge: + - patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/wan-federation/dc1-ns2-static-server/patch.yaml b/acceptance/tests/fixtures/cases/wan-federation/dc1-ns2-static-server/patch.yaml new file mode 100644 index 0000000000..c4f181ce7d --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/dc1-ns2-static-server/patch.yaml @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: static-server +spec: + template: + metadata: + annotations: + "consul.hashicorp.com/connect-inject": "true" + spec: + containers: + - name: static-server + image: docker.mirror.hashicorp.services/kschoche/http-echo:latest + args: + - -text="ns2" + - -listen=:8080 + ports: + - containerPort: 8080 + name: http + livenessProbe: + httpGet: + port: 8080 + initialDelaySeconds: 1 + failureThreshold: 1 + periodSeconds: 1 + startupProbe: + httpGet: + port: 8080 + initialDelaySeconds: 1 + failureThreshold: 30 + periodSeconds: 1 + readinessProbe: + exec: + command: ['sh', '-c', 'test ! -f /tmp/unhealthy'] + initialDelaySeconds: 1 + failureThreshold: 1 + periodSeconds: 1 + serviceAccountName: static-server diff --git a/acceptance/tests/fixtures/cases/wan-federation/dc1-static-server/kustomization.yaml b/acceptance/tests/fixtures/cases/wan-federation/dc1-static-server/kustomization.yaml new file mode 100644 index 0000000000..0775282c18 --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/dc1-static-server/kustomization.yaml @@ -0,0 +1,8 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resources: + - ../../../bases/static-server + +patchesStrategicMerge: + - patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/wan-federation/dc1-static-server/patch.yaml b/acceptance/tests/fixtures/cases/wan-federation/dc1-static-server/patch.yaml new file mode 100644 index 0000000000..60c1219e33 --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/dc1-static-server/patch.yaml @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: static-server +spec: + template: + metadata: + annotations: + "consul.hashicorp.com/connect-inject": "true" + spec: + containers: + - name: static-server + image: docker.mirror.hashicorp.services/kschoche/http-echo:latest + args: + - -text="dc1" + - -listen=:8080 + ports: + - containerPort: 8080 + name: http + livenessProbe: + httpGet: + port: 8080 + initialDelaySeconds: 1 + failureThreshold: 1 + periodSeconds: 1 + startupProbe: + httpGet: + port: 8080 + initialDelaySeconds: 1 + failureThreshold: 30 + periodSeconds: 1 + readinessProbe: + exec: + command: ['sh', '-c', 'test ! -f /tmp/unhealthy'] + initialDelaySeconds: 1 + failureThreshold: 1 + periodSeconds: 1 + serviceAccountName: static-server diff --git a/acceptance/tests/fixtures/cases/wan-federation/dc2-static-server/kustomization.yaml b/acceptance/tests/fixtures/cases/wan-federation/dc2-static-server/kustomization.yaml new file mode 100644 index 0000000000..0775282c18 --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/dc2-static-server/kustomization.yaml @@ -0,0 +1,8 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resources: + - ../../../bases/static-server + +patchesStrategicMerge: + - patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/wan-federation/dc2-static-server/patch.yaml b/acceptance/tests/fixtures/cases/wan-federation/dc2-static-server/patch.yaml new file mode 100644 index 0000000000..b167f50c9a --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/dc2-static-server/patch.yaml @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: static-server +spec: + template: + metadata: + annotations: + "consul.hashicorp.com/connect-inject": "true" + spec: + containers: + - name: static-server + image: docker.mirror.hashicorp.services/kschoche/http-echo:latest + args: + - -text="dc2" + - -listen=:8080 + ports: + - containerPort: 8080 + name: http + livenessProbe: + httpGet: + port: 8080 + initialDelaySeconds: 1 + failureThreshold: 1 + periodSeconds: 1 + startupProbe: + httpGet: + port: 8080 + initialDelaySeconds: 1 + failureThreshold: 30 + periodSeconds: 1 + readinessProbe: + exec: + command: ['sh', '-c', 'test ! -f /tmp/unhealthy'] + initialDelaySeconds: 1 + failureThreshold: 1 + periodSeconds: 1 + serviceAccountName: static-server diff --git a/acceptance/tests/fixtures/cases/wan-federation/service-resolver/kustomization.yaml b/acceptance/tests/fixtures/cases/wan-federation/service-resolver/kustomization.yaml new file mode 100644 index 0000000000..36e8097f47 --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/service-resolver/kustomization.yaml @@ -0,0 +1,8 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resources: + - ../../../bases/service-resolver + +patchesStrategicMerge: + - patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/wan-federation/service-resolver/patch.yaml b/acceptance/tests/fixtures/cases/wan-federation/service-resolver/patch.yaml new file mode 100644 index 0000000000..e89156f605 --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/service-resolver/patch.yaml @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: consul.hashicorp.com/v1alpha1 +kind: ServiceResolver +metadata: + name: static-server +spec: + connectTimeout: 15s + failover: + '*': + targets: + - datacenter: "dc2" + - namespace: "ns2" + diff --git a/acceptance/tests/fixtures/cases/wan-federation/static-client/kustomization.yaml b/acceptance/tests/fixtures/cases/wan-federation/static-client/kustomization.yaml new file mode 100644 index 0000000000..38bc36bffd --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/static-client/kustomization.yaml @@ -0,0 +1,8 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resources: + - ../../../bases/static-client + +patchesStrategicMerge: + - patch.yaml \ No newline at end of file diff --git a/acceptance/tests/fixtures/cases/wan-federation/static-client/patch.yaml b/acceptance/tests/fixtures/cases/wan-federation/static-client/patch.yaml new file mode 100644 index 0000000000..f2f8981601 --- /dev/null +++ b/acceptance/tests/fixtures/cases/wan-federation/static-client/patch.yaml @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: static-client +spec: + template: + metadata: + annotations: + 'consul.hashicorp.com/connect-inject': 'true' + "consul.hashicorp.com/connect-service-upstreams": "static-server:1234" + spec: + containers: + - name: static-client + image: anubhavmishra/tiny-tools:latest + # Just spin & wait forever, we'll use `kubectl exec` to demo + command: ['/bin/sh', '-c', '--'] + args: ['while true; do sleep 30; done;'] + # If ACLs are enabled, the serviceAccountName must match the Consul service name. + serviceAccountName: static-client \ No newline at end of file diff --git a/acceptance/tests/wan-federation/wan_federation_gateway_test.go b/acceptance/tests/wan-federation/wan_federation_gateway_test.go index 6abacda438..ec466c93ec 100644 --- a/acceptance/tests/wan-federation/wan_federation_gateway_test.go +++ b/acceptance/tests/wan-federation/wan_federation_gateway_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" "github.com/hashicorp/consul-k8s/acceptance/framework/environment" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" @@ -60,15 +61,6 @@ func TestWANFederation_Gateway(t *testing.T) { primaryConsulCluster := consul.NewHelmCluster(t, primaryHelmValues, primaryContext, cfg, releaseName) primaryConsulCluster.Create(t) - // Get the federation secret from the primary cluster and apply it to secondary cluster - federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) - logger.Logf(t, "retrieving federation secret %s from the primary cluster and applying to the secondary", federationSecretName) - federationSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) - require.NoError(t, err) - federationSecret.ResourceVersion = "" - _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) - require.NoError(t, err) - var k8sAuthMethodHost string // When running on kind, the kube API address in kubeconfig will have a localhost address // which will not work from inside the container. That's why we need to use the endpoints address instead @@ -82,6 +74,8 @@ func TestWANFederation_Gateway(t *testing.T) { k8sAuthMethodHost = k8s.KubernetesAPIServerHostFromOptions(t, secondaryContext.KubectlOptions(t)) } + federationSecretName := copyFederationSecret(t, releaseName, primaryContext, secondaryContext) + // Create secondary cluster secondaryHelmValues := map[string]string{ "global.datacenter": "dc2", @@ -217,7 +211,7 @@ func checkConnectivity(t *testing.T, ctx environment.TestContext, client *api.Cl targetAddress := fmt.Sprintf("http://%s/", gatewayAddress) logger.Log(t, "checking that the connection is not successful because there's no intention") - k8s.CheckStaticServerHTTPConnectionFailing(t, ctx.KubectlOptions(t), StaticClientName, targetAddress) + k8s.CheckStaticServerHTTPConnectionFailing(t, ctx.KubectlOptions(t), connhelper.StaticClientName, targetAddress) logger.Log(t, "creating intention") _, _, err := client.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{ @@ -237,5 +231,5 @@ func checkConnectivity(t *testing.T, ctx environment.TestContext, client *api.Cl }() logger.Log(t, "checking that connection is successful") - k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), StaticClientName, targetAddress) + k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), connhelper.StaticClientName, targetAddress) } diff --git a/acceptance/tests/wan-federation/wan_federation_test.go b/acceptance/tests/wan-federation/wan_federation_test.go index 8edc1f5d03..41d237113a 100644 --- a/acceptance/tests/wan-federation/wan_federation_test.go +++ b/acceptance/tests/wan-federation/wan_federation_test.go @@ -8,17 +8,35 @@ import ( "fmt" "strconv" "testing" + "time" + terratestK8s "github.com/gruntwork-io/terratest/modules/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/connhelper" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const StaticClientName = "static-client" +const ( + staticClientDeployment = "deploy/static-client" + staticServerDeployment = "deploy/static-server" + + retryTimeout = 5 * time.Minute + + primaryDatacenter = "dc1" + secondaryDatacenter = "dc2" + + localServerPort = "1234" + + primaryNamespace = "ns1" + secondaryNamespace = "ns2" +) // Test that Connect and wan federation over mesh gateways work in a default installation // i.e. without ACLs because TLS is required for WAN federation over mesh gateways. @@ -47,7 +65,7 @@ func TestWANFederation(t *testing.T) { secondaryContext := env.Context(t, 1) primaryHelmValues := map[string]string{ - "global.datacenter": "dc1", + "global.datacenter": primaryDatacenter, "global.tls.enabled": "true", "global.tls.httpsOnly": strconv.FormatBool(c.secure), @@ -77,31 +95,13 @@ func TestWANFederation(t *testing.T) { primaryConsulCluster.Create(t) // Get the federation secret from the primary cluster and apply it to secondary cluster - federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) - logger.Logf(t, "retrieving federation secret %s from the primary cluster and applying to the secondary", federationSecretName) - federationSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) - require.NoError(t, err) - federationSecret.ResourceVersion = "" - federationSecret.Namespace = secondaryContext.KubectlOptions(t).Namespace - _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) - require.NoError(t, err) - - var k8sAuthMethodHost string - // When running on kind, the kube API address in kubeconfig will have a localhost address - // which will not work from inside the container. That's why we need to use the endpoints address instead - // which will point the node IP. - if cfg.UseKind { - // The Kubernetes AuthMethod host is read from the endpoints for the Kubernetes service. - kubernetesEndpoint, err := secondaryContext.KubernetesClient(t).CoreV1().Endpoints("default").Get(context.Background(), "kubernetes", metav1.GetOptions{}) - require.NoError(t, err) - k8sAuthMethodHost = fmt.Sprintf("%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) - } else { - k8sAuthMethodHost = k8s.KubernetesAPIServerHostFromOptions(t, secondaryContext.KubectlOptions(t)) - } + federationSecretName := copyFederationSecret(t, releaseName, primaryContext, secondaryContext) + + k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, secondaryContext) // Create secondary cluster secondaryHelmValues := map[string]string{ - "global.datacenter": "dc2", + "global.datacenter": secondaryDatacenter, "global.tls.enabled": "true", "global.tls.httpsOnly": "false", @@ -130,7 +130,7 @@ func TestWANFederation(t *testing.T) { secondaryHelmValues["global.acls.replicationToken.secretName"] = federationSecretName secondaryHelmValues["global.acls.replicationToken.secretKey"] = "replicationToken" secondaryHelmValues["global.federation.k8sAuthMethodHost"] = k8sAuthMethodHost - secondaryHelmValues["global.federation.primaryDatacenter"] = "dc1" + secondaryHelmValues["global.federation.primaryDatacenter"] = primaryDatacenter } if cfg.UseKind { @@ -190,11 +190,266 @@ func TestWANFederation(t *testing.T) { k8s.DeployKustomize(t, primaryHelper.KubectlOptsForApp(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/static-client-multi-dc") if c.secure { - primaryHelper.CreateIntention(t) + primaryHelper.CreateIntention(t, connhelper.IntentionOpts{}) } logger.Log(t, "checking that connection is successful") - k8s.CheckStaticServerConnectionSuccessful(t, primaryHelper.KubectlOptsForApp(t), StaticClientName, "http://localhost:1234") + k8s.CheckStaticServerConnectionSuccessful(t, primaryHelper.KubectlOptsForApp(t), connhelper.StaticClientName, "http://localhost:1234") }) } } + +// Test failover scenarios with a static-server in dc1 and a static-server +// in dc2. Use the static-client on dc1 to reach static-server on dc1 in the +// nominal scenario, then cause a failure in dc1 static-server to see the static-client failover to +// the static-server in dc2 +/* + dc1-static-client -- nominal -- > dc1-static-server in namespace ns1 + dc1-static-client -- failover --> dc2-static-server in namespace ns1 + dc1-static-client -- failover --> dc1-static-server in namespace ns2 +*/ +func TestWANFederationFailover(t *testing.T) { + cases := []struct { + name string + secure bool + }{ + { + name: "secure", + secure: true, + }, + { + name: "default", + secure: false, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + env := suite.Environment() + cfg := suite.Config() + + if cfg.EnableRestrictedPSAEnforcement { + t.Skip("This test case is not run with enable restricted PSA enforcement enabled") + } + + primaryContext := env.DefaultContext(t) + secondaryContext := env.Context(t, 1) + + primaryHelmValues := map[string]string{ + "global.datacenter": primaryDatacenter, + + "global.tls.enabled": "true", + "global.tls.httpsOnly": strconv.FormatBool(c.secure), + + "global.federation.enabled": "true", + "global.federation.createFederationSecret": "true", + + "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), + "global.acls.createReplicationToken": strconv.FormatBool(c.secure), + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + + "global.enableConsulNamespaces": "true", + "connectInject.consulNamespaces.mirroringK8S": "true", + } + + if cfg.UseKind { + primaryHelmValues["meshGateway.service.type"] = "NodePort" + primaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + releaseName := helpers.RandomName() + + // Install the primary consul cluster in the default kubernetes context + primaryConsulCluster := consul.NewHelmCluster(t, primaryHelmValues, primaryContext, cfg, releaseName) + primaryConsulCluster.Create(t) + + // Get the federation secret from the primary cluster and apply it to secondary cluster + federationSecretName := copyFederationSecret(t, releaseName, primaryContext, secondaryContext) + + k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, secondaryContext) + + // Create secondary cluster + secondaryHelmValues := map[string]string{ + "global.datacenter": secondaryDatacenter, + + "global.tls.enabled": "true", + "global.tls.httpsOnly": "false", + "global.acls.manageSystemACLs": strconv.FormatBool(c.secure), + "global.tls.caCert.secretName": federationSecretName, + "global.tls.caCert.secretKey": "caCert", + "global.tls.caKey.secretName": federationSecretName, + "global.tls.caKey.secretKey": "caKey", + + "global.federation.enabled": "true", + + "server.extraVolumes[0].type": "secret", + "server.extraVolumes[0].name": federationSecretName, + "server.extraVolumes[0].load": "true", + "server.extraVolumes[0].items[0].key": "serverConfigJSON", + "server.extraVolumes[0].items[0].path": "config.json", + + "connectInject.enabled": "true", + "connectInject.replicas": "1", + + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + + "global.enableConsulNamespaces": "true", + "connectInject.consulNamespaces.mirroringK8S": "true", + } + + if c.secure { + secondaryHelmValues["global.acls.replicationToken.secretName"] = federationSecretName + secondaryHelmValues["global.acls.replicationToken.secretKey"] = "replicationToken" + secondaryHelmValues["global.federation.k8sAuthMethodHost"] = k8sAuthMethodHost + secondaryHelmValues["global.federation.primaryDatacenter"] = primaryDatacenter + } + + if cfg.UseKind { + secondaryHelmValues["meshGateway.service.type"] = "NodePort" + secondaryHelmValues["meshGateway.service.nodePort"] = "30000" + } + + // Install the secondary consul cluster in the secondary kubernetes context + secondaryConsulCluster := consul.NewHelmCluster(t, secondaryHelmValues, secondaryContext, cfg, releaseName) + secondaryConsulCluster.Create(t) + + primaryClient, _ := primaryConsulCluster.SetupConsulClient(t, c.secure) + secondaryClient, _ := secondaryConsulCluster.SetupConsulClient(t, c.secure) + + // Verify federation between servers + logger.Log(t, "Verifying federation was successful") + helpers.VerifyFederation(t, primaryClient, secondaryClient, releaseName, c.secure) + + // Create a ProxyDefaults resource to configure services to use the mesh + // gateways. + logger.Log(t, "Creating proxy-defaults config") + kustomizeDir := "../fixtures/bases/mesh-gateway" + k8s.KubectlApplyK(t, secondaryContext.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() { + k8s.KubectlDeleteK(t, secondaryContext.KubectlOptions(t), kustomizeDir) + }) + + primaryHelper := connhelper.ConnectHelper{ + Secure: c.secure, + ReleaseName: releaseName, + Ctx: primaryContext, + UseAppNamespace: false, + Cfg: cfg, + ConsulClient: primaryClient, + } + secondaryHelper := connhelper.ConnectHelper{ + Secure: c.secure, + ReleaseName: releaseName, + Ctx: secondaryContext, + UseAppNamespace: false, + Cfg: cfg, + ConsulClient: secondaryClient, + } + + // Create Namespaces + // We create a namespace (ns1) in both the primary and secondary datacenters (dc1, dc2) + // We then create a secondary namespace (ns2) in the primary datacenter (dc1) + primaryNamespaceOpts := primaryHelper.Ctx.KubectlOptionsForNamespace(primaryNamespace) + primaryHelper.CreateNamespace(t, primaryNamespaceOpts.Namespace) + primarySecondaryNamepsaceOpts := primaryHelper.Ctx.KubectlOptionsForNamespace(secondaryNamespace) + primaryHelper.CreateNamespace(t, primarySecondaryNamepsaceOpts.Namespace) + secondaryNamespaceOpts := secondaryHelper.Ctx.KubectlOptionsForNamespace(primaryNamespace) + secondaryHelper.CreateNamespace(t, secondaryNamespaceOpts.Namespace) + + // Create a static-server in dc2 to respond with its own name for checking failover. + logger.Log(t, "Creating static-server in dc2") + k8s.DeployKustomize(t, secondaryNamespaceOpts, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/wan-federation/dc2-static-server") + + // Spin up a server on dc1 which will be the primary upstream for our client + logger.Log(t, "Creating static-server in dc1") + k8s.DeployKustomize(t, primaryNamespaceOpts, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/wan-federation/dc1-static-server") + logger.Log(t, "Creating static-client in dc1") + k8s.DeployKustomize(t, primaryNamespaceOpts, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/wan-federation/static-client") + + // Spin up a second server on dc1 in a separate namespace + logger.Logf(t, "Creating server on dc1 in namespace %s", primarySecondaryNamepsaceOpts.Namespace) + k8s.DeployKustomize(t, primarySecondaryNamepsaceOpts, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/wan-federation/dc1-ns2-static-server") + + // There is currently an issue that requires the intentions and resolvers to be created after + // the static-server/clients when using namespaces. When created before, Consul gives a "namespace does not exist" + // error + if c.secure { + // Only need to create intentions in the primary datacenter as they will be replicated to the secondary + // ns1 static-client (source) -> ns1 static-server (destination) + primaryHelper.CreateIntention(t, connhelper.IntentionOpts{DestinationNamespace: primaryNamespaceOpts.Namespace, SourceNamespace: primaryNamespaceOpts.Namespace}) + + // ns1 static-client (source) -> ns2 static-server (destination) + primaryHelper.CreateIntention(t, connhelper.IntentionOpts{DestinationNamespace: primarySecondaryNamepsaceOpts.Namespace, SourceNamespace: primaryNamespaceOpts.Namespace}) + } + + // Create a service resolver for failover + logger.Log(t, "Creating service resolver") + k8s.DeployKustomize(t, primaryNamespaceOpts, cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/wan-federation/service-resolver") + + // Verify that we respond with the static-server in the primary datacenter + logger.Log(t, "Verifying static-server in dc1 responds") + serviceFailoverCheck(t, primaryNamespaceOpts, localServerPort, primaryDatacenter) + + // Scale down the primary datacenter static-server and see the failover + logger.Log(t, "Scale down dc1 static-server") + k8s.KubectlScale(t, primaryNamespaceOpts, staticServerDeployment, 0) + + // Verify that we respond with the static-server in the secondary datacenter + logger.Log(t, "Verifying static-server in dc2 responds") + serviceFailoverCheck(t, primaryNamespaceOpts, localServerPort, secondaryDatacenter) + + // scale down the primary datacenter static-server and see the failover + logger.Log(t, "Scale down dc2 static-server") + k8s.KubectlScale(t, secondaryNamespaceOpts, staticServerDeployment, 0) + + // Verify that we respond with the static-server in the secondary datacenter + logger.Log(t, "Verifying static-server in secondary namespace (ns2) responds") + serviceFailoverCheck(t, primaryNamespaceOpts, localServerPort, secondaryNamespace) + }) + } +} + +// serviceFailoverCheck verifies that the server failed over as expected by checking that curling the `static-server` +// using the `static-client` responds with the expected cluster name. Each static-server responds with a unique +// name so that we can verify failover occurred as expected. +func serviceFailoverCheck(t *testing.T, options *terratestK8s.KubectlOptions, port string, expectedName string) { + timer := &retry.Timer{Timeout: retryTimeout, Wait: 5 * time.Second} + var resp string + var err error + + f := func(ft require.TestingT) { + resp, err = k8s.RunKubectlAndGetOutputE(t, options, "exec", "-i", + staticClientDeployment, "-c", connhelper.StaticClientName, "--", "curl", fmt.Sprintf("localhost:%s", port)) + require.NoError(ft, err) + assert.Contains(ft, resp, expectedName) + } + + retry.RunWith(timer, t, func(r *retry.R) { + f(r) + }) + + // Try again to rule out load-balancing + f(t) + + logger.Log(t, resp) +} + +func copyFederationSecret(t *testing.T, releaseName string, primaryContext, secondaryContext environment.TestContext) string { + // Get the federation secret from the primary cluster and apply it to secondary cluster + federationSecretName := fmt.Sprintf("%s-consul-federation", releaseName) + logger.Logf(t, "Retrieving federation secret %s from the primary cluster and applying to the secondary", federationSecretName) + federationSecret, err := primaryContext.KubernetesClient(t).CoreV1().Secrets(primaryContext.KubectlOptions(t).Namespace).Get(context.Background(), federationSecretName, metav1.GetOptions{}) + require.NoError(t, err) + federationSecret.ResourceVersion = "" + federationSecret.Namespace = secondaryContext.KubectlOptions(t).Namespace + _, err = secondaryContext.KubernetesClient(t).CoreV1().Secrets(secondaryContext.KubectlOptions(t).Namespace).Create(context.Background(), federationSecret, metav1.CreateOptions{}) + require.NoError(t, err) + + return federationSecretName +}