Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 10 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -130,22 +130,26 @@ kind-cni-calico:
kubectl create -f $(CURDIR)/acceptance/framework/environment/cni-kind/custom-resources.yaml
@sleep 20

# Helper target for doing local cni acceptance testing
kind-cni:
kind-delete:
kind delete cluster --name dc1
kind delete cluster --name dc2
kind delete cluster --name dc3
kind delete cluster --name dc4


# Helper target for doing local cni acceptance testing
kind-cni: kind-delete
kind create cluster --config=$(CURDIR)/acceptance/framework/environment/cni-kind/kind.config --name dc1 --image $(KIND_NODE_IMAGE)
make kind-cni-calico
kind create cluster --config=$(CURDIR)/acceptance/framework/environment/cni-kind/kind.config --name dc2 --image $(KIND_NODE_IMAGE)
make kind-cni-calico

# Helper target for doing local acceptance testing
kind:
kind delete cluster --name dc1
kind delete cluster --name dc2
kind: kind-delete
kind create cluster --name dc1 --image $(KIND_NODE_IMAGE)
kind create cluster --name dc2 --image $(KIND_NODE_IMAGE)

kind create cluster --name dc3 --image $(KIND_NODE_IMAGE)
kind create cluster --name dc4 --image $(KIND_NODE_IMAGE)

# Helper target for loading local dev images (run with `DEV_IMAGE=...` to load non-k8s images)
kind-load:
Expand Down
9 changes: 5 additions & 4 deletions acceptance/ci-inputs/kind_acceptance_test_packages.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@

- {runner: 0, test-packages: "partitions"}
- {runner: 1, test-packages: "peering"}
- {runner: 2, test-packages: "connect snapshot-agent wan-federation"}
- {runner: 3, test-packages: "cli vault metrics"}
- {runner: 4, test-packages: "api-gateway ingress-gateway sync example consul-dns"}
- {runner: 5, test-packages: "config-entries terminating-gateway basic"}
- {runner: 2, test-packages: "sameness"}
- {runner: 3, test-packages: "connect snapshot-agent wan-federation"}
- {runner: 4, test-packages: "cli vault metrics"}
- {runner: 5, test-packages: "api-gateway ingress-gateway sync example consul-dns"}
- {runner: 6, test-packages: "config-entries terminating-gateway basic"}
1 change: 1 addition & 0 deletions acceptance/framework/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ type TestConfig struct {
VaultServerVersion string

NoCleanupOnFailure bool
NoCleanup bool
DebugDirectory string

UseAKS bool
Expand Down
18 changes: 9 additions & 9 deletions acceptance/framework/connhelper/connect_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,22 +133,22 @@ func (c *ConnectHelper) DeployClientAndServer(t *testing.T) {

// TODO: A base fixture is the wrong place for these files
k8s.KubectlApply(t, opts, "../fixtures/bases/openshift/")
helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, func() {
k8s.KubectlDelete(t, opts, "../fixtures/bases/openshift/")
})

k8s.DeployKustomize(t, opts, c.Cfg.NoCleanupOnFailure, c.Cfg.DebugDirectory, "../fixtures/cases/static-server-openshift")
k8s.DeployKustomize(t, opts, c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, c.Cfg.DebugDirectory, "../fixtures/cases/static-server-openshift")
if c.Cfg.EnableTransparentProxy {
k8s.DeployKustomize(t, opts, c.Cfg.NoCleanupOnFailure, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-openshift-tproxy")
k8s.DeployKustomize(t, opts, c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-openshift-tproxy")
} else {
k8s.DeployKustomize(t, opts, c.Cfg.NoCleanupOnFailure, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-openshift-inject")
k8s.DeployKustomize(t, opts, c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-openshift-inject")
}
} else {
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.DebugDirectory, "../fixtures/cases/static-server-inject")
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, c.Cfg.DebugDirectory, "../fixtures/cases/static-server-inject")
if c.Cfg.EnableTransparentProxy {
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy")
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-tproxy")
} else {
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-inject")
k8s.DeployKustomize(t, c.Ctx.KubectlOptions(t), c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, c.Cfg.DebugDirectory, "../fixtures/cases/static-client-inject")
}
}
// Check that both static-server and static-client have been injected and
Expand Down Expand Up @@ -185,7 +185,7 @@ func (c *ConnectHelper) SetupAppNamespace(t *testing.T) {
return
}
require.NoError(t, err)
helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, func() {
k8s.RunKubectl(t, opts, "delete", "ns", opts.Namespace)
})

Expand All @@ -208,7 +208,7 @@ func (c *ConnectHelper) CreateResolverRedirect(t *testing.T) {
kustomizeDir := "../fixtures/cases/resolver-redirect-virtualip"
k8s.KubectlApplyK(t, opts, kustomizeDir)

helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, c.Cfg.NoCleanupOnFailure, c.Cfg.NoCleanup, func() {
k8s.KubectlDeleteK(t, opts, kustomizeDir)
})
}
Expand Down
4 changes: 3 additions & 1 deletion acceptance/framework/consul/cli_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ type CLICluster struct {
kubeConfig string
kubeContext string
noCleanupOnFailure bool
noCleanup bool
debugDirectory string
logger terratestLogger.TestLogger
cli cli.CLI
Expand Down Expand Up @@ -109,6 +110,7 @@ func NewCLICluster(
kubeConfig: cfg.GetPrimaryKubeEnv().KubeConfig,
kubeContext: cfg.GetPrimaryKubeEnv().KubeContext,
noCleanupOnFailure: cfg.NoCleanupOnFailure,
noCleanup: cfg.NoCleanup,
debugDirectory: cfg.DebugDirectory,
logger: logger,
cli: *cli,
Expand All @@ -122,7 +124,7 @@ func (c *CLICluster) Create(t *testing.T) {

// Make sure we delete the cluster if we receive an interrupt signal and
// register cleanup so that we delete the cluster when test finishes.
helpers.Cleanup(t, c.noCleanupOnFailure, func() {
helpers.Cleanup(t, c.noCleanupOnFailure, c.noCleanup, func() {
c.Destroy(t)
})

Expand Down
10 changes: 6 additions & 4 deletions acceptance/framework/consul/helm_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ type HelmCluster struct {
runtimeClient client.Client
kubernetesClient kubernetes.Interface
noCleanupOnFailure bool
noCleanup bool
debugDirectory string
logger terratestLogger.TestLogger
}
Expand Down Expand Up @@ -107,6 +108,7 @@ func NewHelmCluster(
runtimeClient: ctx.ControllerRuntimeClient(t),
kubernetesClient: ctx.KubernetesClient(t),
noCleanupOnFailure: cfg.NoCleanupOnFailure,
noCleanup: cfg.NoCleanup,
debugDirectory: cfg.DebugDirectory,
logger: logger,
}
Expand All @@ -117,7 +119,7 @@ func (h *HelmCluster) Create(t *testing.T) {

// Make sure we delete the cluster if we receive an interrupt signal and
// register cleanup so that we delete the cluster when test finishes.
helpers.Cleanup(t, h.noCleanupOnFailure, func() {
helpers.Cleanup(t, h.noCleanupOnFailure, h.noCleanup, func() {
h.Destroy(t)
})

Expand Down Expand Up @@ -508,7 +510,7 @@ func configurePodSecurityPolicies(t *testing.T, client kubernetes.Interface, cfg
}
}

helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
_ = client.PolicyV1beta1().PodSecurityPolicies().Delete(context.Background(), pspName, metav1.DeleteOptions{})
_ = client.RbacV1().ClusterRoles().Delete(context.Background(), pspName, metav1.DeleteOptions{})
_ = client.RbacV1().RoleBindings(namespace).Delete(context.Background(), pspName, metav1.DeleteOptions{})
Expand Down Expand Up @@ -559,7 +561,7 @@ func configureSCCs(t *testing.T, client kubernetes.Interface, cfg *config.TestCo
}
}

helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
_ = client.RbacV1().RoleBindings(namespace).Delete(context.Background(), anyuidRoleBinding, metav1.DeleteOptions{})
_ = client.RbacV1().RoleBindings(namespace).Delete(context.Background(), privilegedRoleBinding, metav1.DeleteOptions{})
})
Expand Down Expand Up @@ -601,7 +603,7 @@ func CreateK8sSecret(t *testing.T, client kubernetes.Interface, cfg *config.Test
}
})

helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
_ = client.CoreV1().Secrets(namespace).Delete(context.Background(), secretName, metav1.DeleteOptions{})
})
}
5 changes: 5 additions & 0 deletions acceptance/framework/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ type TestFlags struct {
flagHCPResourceID string

flagNoCleanupOnFailure bool
flagNoCleanup bool

flagDebugDirectory string

Expand Down Expand Up @@ -132,6 +133,9 @@ func (t *TestFlags) init() {
"If true, the tests will not cleanup Kubernetes resources they create when they finish running."+
"Note this flag must be run with -failfast flag, otherwise subsequent tests will fail.")

flag.BoolVar(&t.flagNoCleanup, "no-cleanup", false,
"If true, the tests will not cleanup Kubernetes resources for Vault test")

flag.StringVar(&t.flagDebugDirectory, "debug-directory", "", "The directory where to write debug information about failed test runs, "+
"such as logs and pod definitions. If not provided, a temporary directory will be created by the tests.")

Expand Down Expand Up @@ -224,6 +228,7 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig {
HCPResourceID: t.flagHCPResourceID,

NoCleanupOnFailure: t.flagNoCleanupOnFailure,
NoCleanup: t.flagNoCleanup,
DebugDirectory: tempDir,
UseAKS: t.flagUseAKS,
UseEKS: t.flagUseEKS,
Expand Down
4 changes: 2 additions & 2 deletions acceptance/framework/helpers/helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func SetupInterruptHandler(cleanup func()) {
// Cleanup will both register a cleanup function with t
// and SetupInterruptHandler to make sure resources get cleaned up
// if an interrupt signal is caught.
func Cleanup(t *testing.T, noCleanupOnFailure bool, cleanup func()) {
func Cleanup(t *testing.T, noCleanupOnFailure bool, noCleanup bool, cleanup func()) {
t.Helper()

// Always clean up when an interrupt signal is caught.
Expand All @@ -97,7 +97,7 @@ func Cleanup(t *testing.T, noCleanupOnFailure bool, cleanup func()) {
// We need to wrap the cleanup function because t that is passed in to this function
// might not have the information on whether the test has failed yet.
wrappedCleanupFunc := func() {
if !(noCleanupOnFailure && t.Failed()) {
if !((noCleanupOnFailure && t.Failed()) || noCleanup) {
logger.Logf(t, "cleaning up resources for %s", t.Name())
cleanup()
} else {
Expand Down
8 changes: 4 additions & 4 deletions acceptance/framework/k8s/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (

// Deploy creates a Kubernetes deployment by applying configuration stored at filepath,
// sets up a cleanup function and waits for the deployment to become available.
func Deploy(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool, debugDirectory string, filepath string) {
func Deploy(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool, noCleanup bool, debugDirectory string, filepath string) {
t.Helper()

KubectlApply(t, options, filepath)
Expand All @@ -33,7 +33,7 @@ func Deploy(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool,
err = yaml.NewYAMLOrJSONDecoder(file, 1024).Decode(&deployment)
require.NoError(t, err)

helpers.Cleanup(t, noCleanupOnFailure, func() {
helpers.Cleanup(t, noCleanupOnFailure, noCleanup, func() {
// Note: this delete command won't wait for pods to be fully terminated.
// This shouldn't cause any test pollution because the underlying
// objects are deployments, and so when other tests create these
Expand All @@ -47,7 +47,7 @@ func Deploy(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool,

// DeployKustomize creates a Kubernetes deployment by applying the kustomize directory stored at kustomizeDir,
// sets up a cleanup function and waits for the deployment to become available.
func DeployKustomize(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool, debugDirectory string, kustomizeDir string) {
func DeployKustomize(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailure bool, noCleanup bool, debugDirectory string, kustomizeDir string) {
t.Helper()

KubectlApplyK(t, options, kustomizeDir)
Expand All @@ -59,7 +59,7 @@ func DeployKustomize(t *testing.T, options *k8s.KubectlOptions, noCleanupOnFailu
err = yaml.NewYAMLOrJSONDecoder(strings.NewReader(output), 1024).Decode(&deployment)
require.NoError(t, err)

helpers.Cleanup(t, noCleanupOnFailure, func() {
helpers.Cleanup(t, noCleanupOnFailure, noCleanup, func() {
// Note: this delete command won't wait for pods to be fully terminated.
// This shouldn't cause any test pollution because the underlying
// objects are deployments, and so when other tests create these
Expand Down
15 changes: 13 additions & 2 deletions acceptance/framework/k8s/kubectl.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
package k8s

import (
"fmt"
"strings"
"testing"
"time"
Expand All @@ -16,6 +17,10 @@ import (
"github.com/stretchr/testify/require"
)

const (
kubectlTimeout = "--timeout=120s"
)

// kubeAPIConnectErrs are errors that sometimes occur when talking to the
// Kubernetes API related to connection issues.
var kubeAPIConnectErrs = []string{
Expand Down Expand Up @@ -97,7 +102,7 @@ func KubectlApplyK(t *testing.T, options *k8s.KubectlOptions, kustomizeDir strin
// deletes it from the cluster by running 'kubectl delete -f'.
// If there's an error deleting the file, fail the test.
func KubectlDelete(t *testing.T, options *k8s.KubectlOptions, configPath string) {
_, err := RunKubectlAndGetOutputE(t, options, "delete", "--timeout=60s", "-f", configPath)
_, err := RunKubectlAndGetOutputE(t, options, "delete", kubectlTimeout, "-f", configPath)
require.NoError(t, err)
}

Expand All @@ -107,7 +112,13 @@ func KubectlDelete(t *testing.T, options *k8s.KubectlOptions, configPath string)
func KubectlDeleteK(t *testing.T, options *k8s.KubectlOptions, kustomizeDir string) {
// Ignore not found errors because Kubernetes automatically cleans up the kube secrets that we deployed
// referencing the ServiceAccount when it is deleted.
_, err := RunKubectlAndGetOutputE(t, options, "delete", "--timeout=60s", "--ignore-not-found", "-k", kustomizeDir)
_, err := RunKubectlAndGetOutputE(t, options, "delete", kubectlTimeout, "--ignore-not-found", "-k", kustomizeDir)
require.NoError(t, err)
}

// KubectlScale takes a deployment and scales it to the provided number of replicas.
func KubectlScale(t *testing.T, options *k8s.KubectlOptions, deployment string, replicas int) {
_, err := RunKubectlAndGetOutputE(t, options, "scale", kubectlTimeout, fmt.Sprintf("--replicas=%d", replicas), deployment)
require.NoError(t, err)
}

Expand Down
8 changes: 5 additions & 3 deletions acceptance/framework/vault/vault_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ type VaultCluster struct {
kubernetesClient kubernetes.Interface

noCleanupOnFailure bool
noCleanup bool
debugDirectory string
logger terratestLogger.TestLogger
}
Expand Down Expand Up @@ -89,6 +90,7 @@ func NewVaultCluster(t *testing.T, ctx environment.TestContext, cfg *config.Test
kubectlOptions: kopts,
kubernetesClient: ctx.KubernetesClient(t),
noCleanupOnFailure: cfg.NoCleanupOnFailure,
noCleanup: cfg.NoCleanup,
debugDirectory: cfg.DebugDirectory,
logger: logger,
releaseName: releaseName,
Expand Down Expand Up @@ -224,7 +226,7 @@ func (v *VaultCluster) Create(t *testing.T, ctx environment.TestContext, vaultNa

// Make sure we delete the cluster if we receive an interrupt signal and
// register cleanup so that we delete the cluster when test finishes.
helpers.Cleanup(t, v.noCleanupOnFailure, func() {
helpers.Cleanup(t, v.noCleanupOnFailure, v.noCleanup, func() {
v.Destroy(t)
})

Expand Down Expand Up @@ -346,7 +348,7 @@ func (v *VaultCluster) createTLSCerts(t *testing.T) {
require.NoError(t, err)

t.Cleanup(func() {
if !v.noCleanupOnFailure {
if !(v.noCleanupOnFailure || v.noCleanup) {
// We're ignoring error here because secret deletion is best-effort.
_ = v.kubernetesClient.CoreV1().Secrets(namespace).Delete(context.Background(), certSecretName(v.releaseName), metav1.DeleteOptions{})
_ = v.kubernetesClient.CoreV1().Secrets(namespace).Delete(context.Background(), CASecretName(v.releaseName), metav1.DeleteOptions{})
Expand Down Expand Up @@ -419,7 +421,7 @@ func (v *VaultCluster) initAndUnseal(t *testing.T) {
rootTokenSecret := fmt.Sprintf("%s-vault-root-token", v.releaseName)
v.logger.Logf(t, "saving Vault root token to %q Kubernetes secret", rootTokenSecret)

helpers.Cleanup(t, v.noCleanupOnFailure, func() {
helpers.Cleanup(t, v.noCleanupOnFailure, v.noCleanup, func() {
_ = v.kubernetesClient.CoreV1().Secrets(namespace).Delete(context.Background(), rootTokenSecret, metav1.DeleteOptions{})
})
_, err := v.kubernetesClient.CoreV1().Secrets(namespace).Create(context.Background(), &corev1.Secret{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ func TestAPIGateway_ExternalServers(t *testing.T) {
consulCluster.Create(t)

logger.Log(t, "creating static-server and static-client deployments")
k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject")
k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-inject")
k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/static-server-inject")
k8s.DeployKustomize(t, ctx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.NoCleanup, cfg.DebugDirectory, "../fixtures/cases/static-client-inject")

// Override the default proxy config settings for this test
consulClient, _ := consulCluster.SetupConsulClient(t, true, serverReleaseName)
Expand All @@ -79,7 +79,7 @@ func TestAPIGateway_ExternalServers(t *testing.T) {
logger.Log(t, "creating api-gateway resources")
out, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), "apply", "-k", "../fixtures/bases/api-gateway")
require.NoError(t, err, out)
helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() {
helpers.Cleanup(t, cfg.NoCleanupOnFailure, cfg.NoCleanup, func() {
// Ignore errors here because if the test ran as expected
// the custom resources will have been deleted.
k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), "delete", "-k", "../fixtures/bases/api-gateway")
Expand Down
Loading