Skip to content

Commit

Permalink
making tests parallel (#165)
Browse files Browse the repository at this point in the history
* making more tests parallel

* better name for the namespace
  • Loading branch information
dgkanatsios committed Feb 17, 2022
1 parent 8889523 commit a4a9f70
Show file tree
Hide file tree
Showing 17 changed files with 1,342 additions and 979 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ installkind:
mv ./kind ./pkg/operator/testbin/bin/kind

createkindcluster:
./pkg/operator/testbin/bin/kind create cluster --config ./e2e/kind-config.yaml
./pkg/operator/testbin/bin/kind create cluster --config ./e2e/kind-config.yaml --image kindest/node:v1.22.5

deletekindcluster:
./pkg/operator/testbin/bin/kind delete cluster
Expand Down
114 changes: 114 additions & 0 deletions cmd/e2e/allocation_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
package main

import (
"context"
"crypto/tls"
"fmt"

"github.com/google/uuid"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
mpsv1alpha1 "github.com/playfab/thundernetes/pkg/operator/api/v1alpha1"
"k8s.io/client-go/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)

var _ = Describe("test GameServerBuild with allocation tests", Ordered, func() {
testBuildAllocationName := "testbuildallocation"
testBuildAllocationID := "85ffe8da-c82f-4035-86c5-9d2b5f42d6aa"
var cert tls.Certificate
ctx := context.Background()
var kubeClient client.Client
var coreClient *kubernetes.Clientset
BeforeAll(func() {
var err error
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
Expect(err).ToNot(HaveOccurred())
kubeConfig := ctrl.GetConfigOrDie()
kubeClient, err = createKubeClient(kubeConfig)
Expect(err).ToNot(HaveOccurred())
err = kubeClient.Create(ctx, createE2eBuild(testBuildAllocationName, testBuildAllocationID, img))
Expect(err).ToNot(HaveOccurred())
coreClient, err = kubernetes.NewForConfig(kubeConfig)
Expect(err).ToNot(HaveOccurred())

Eventually(func(g Gomega) {
state := buildState{
buildName: testBuildAllocationName,
buildID: testBuildAllocationID,
standingByCount: 2,
podRunningCount: 2,
gsbHealth: mpsv1alpha1.BuildHealthy,
}
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed())
}, timeout, interval).Should(Succeed())
})
It("should return 400 with a non-GUID sessionID", func() {
// allocating with a non-Guid sessionID, expecting 400
sessionID1_5 := "notAGuid"
Expect(allocate(testBuildAllocationID, sessionID1_5, cert)).To(Equal(fmt.Errorf("%s 400", invalidStatusCode)))
Eventually(func(g Gomega) {
state := buildState{
buildName: testBuildAllocationName,
buildID: testBuildAllocationID,
standingByCount: 2,
podRunningCount: 2,
gsbHealth: mpsv1alpha1.BuildHealthy,
}
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed())
}, timeout, interval).Should(Succeed())

})
It("should return 400 with a non-GUID BuildID", func() {
// allocating with a non-Guid BuildID, expecting 400
sessionID1_6 := uuid.New().String()
Expect(allocate("not_a_guid", sessionID1_6, cert)).To(Equal(fmt.Errorf("%s 400", invalidStatusCode)))
Eventually(func(g Gomega) {
state := buildState{
buildName: testBuildAllocationName,
buildID: testBuildAllocationID,
standingByCount: 2,
podRunningCount: 2,
gsbHealth: mpsv1alpha1.BuildHealthy,
}
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed())
}, timeout, interval).Should(Succeed())
})

It("should return 404 with a non-existent BuildID", func() {
// allocating on non-existent BuildID, expecting 404
sessionID1_7 := uuid.New().String()
Expect(allocate(uuid.New().String(), sessionID1_7, cert)).To(Equal(fmt.Errorf("%s 404", invalidStatusCode)))
Eventually(func(g Gomega) {
state := buildState{
buildName: testBuildAllocationName,
buildID: testBuildAllocationID,
standingByCount: 2,
podRunningCount: 2,
gsbHealth: mpsv1alpha1.BuildHealthy,
}
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed())
}, timeout, interval).Should(Succeed())
})

It("should allocate properly and get one active", func() {
// allocating correctly, expecting one active
sessionID1_2 := uuid.New().String()
Expect(allocate(testBuildAllocationID, sessionID1_2, cert)).To(Succeed())
Eventually(func(g Gomega) {
state := buildState{
buildName: testBuildAllocationName,
buildID: testBuildAllocationID,
standingByCount: 2,
activeCount: 1,
podRunningCount: 3,
gsbHealth: mpsv1alpha1.BuildHealthy,
}
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed())
}, timeout, interval).Should(Succeed())

Expect(validateThatAllocatedServersHaveReadyForPlayersUnblocked(ctx, kubeClient, coreClient, testBuildAllocationID, 1)).To(Succeed())

})
})
124 changes: 124 additions & 0 deletions cmd/e2e/build_crashing_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
package main

import (
"context"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
mpsv1alpha1 "github.com/playfab/thundernetes/pkg/operator/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)

var _ = Describe("Crashing Build", func() {
testBuildCrashingName := "crashing"
testCrashingBuildID := "85ffe8da-c82f-4035-86c5-9d2b5f42d6f7"
It("should become unhealthy", func() {
ctx := context.Background()
kubeConfig := ctrl.GetConfigOrDie()
kubeClient, err := createKubeClient(kubeConfig)
Expect(err).ToNot(HaveOccurred())
err = kubeClient.Create(ctx, createCrashingBuild(testBuildCrashingName, testCrashingBuildID, img))
Expect(err).ToNot(HaveOccurred())

Eventually(func(g Gomega) {
gsb := &mpsv1alpha1.GameServerBuild{}
err := kubeClient.Get(ctx, client.ObjectKey{Name: testBuildCrashingName, Namespace: testNamespace}, gsb)
g.Expect(err).ToNot(HaveOccurred())
crashesEqualOrLargerThan5 := gsb.Status.CrashesCount >= 5
g.Expect(crashesEqualOrLargerThan5).To(BeTrue())
state := buildState{
buildName: testBuildCrashingName,
buildID: testCrashingBuildID,
initializingCount: 0,
standingByCount: 0,
podRunningCount: 0,
gsbHealth: mpsv1alpha1.BuildUnhealthy,
}
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed())
}, 45*time.Second, interval).Should(Succeed()) // bigger timeout because of the time crashes take to occur and captured by the controller

// we are updating the GameServerBuild to be able to have more crashes for it to become Unhealthy
gsb := &mpsv1alpha1.GameServerBuild{}
err = kubeClient.Get(ctx, client.ObjectKey{Name: testBuildCrashingName, Namespace: testNamespace}, gsb)
Expect(err).ToNot(HaveOccurred())
patch := client.MergeFrom(gsb.DeepCopy())
gsb.Spec.CrashesToMarkUnhealthy = 10

err = kubeClient.Patch(ctx, gsb, patch)
Expect(err).ToNot(HaveOccurred())

// so we expect it to be healthy again
Eventually(func(g Gomega) {
gsb := &mpsv1alpha1.GameServerBuild{}
err = kubeClient.Get(ctx, client.ObjectKey{Name: testBuildCrashingName, Namespace: testNamespace}, gsb)
Expect(err).ToNot(HaveOccurred())
state := buildState{
buildName: testBuildCrashingName,
buildID: testCrashingBuildID,
initializingCount: 0,
standingByCount: 0,
podRunningCount: 0,
gsbHealth: mpsv1alpha1.BuildHealthy,
}
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed())
}, 10*time.Second, interval).Should(Succeed())

// but only temporarily, since the game servers will continue to crash
Eventually(func(g Gomega) {
gsb := &mpsv1alpha1.GameServerBuild{}
err = kubeClient.Get(ctx, client.ObjectKey{Name: testBuildCrashingName, Namespace: testNamespace}, gsb)
Expect(err).ToNot(HaveOccurred())
var crashesEqualOrLargerThan10 bool = gsb.Status.CrashesCount >= 10
g.Expect(crashesEqualOrLargerThan10).To(BeTrue())
state := buildState{
buildName: testBuildCrashingName,
buildID: testCrashingBuildID,
initializingCount: 0,
standingByCount: 0,
podRunningCount: 0,
gsbHealth: mpsv1alpha1.BuildUnhealthy,
}
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed())
}, 30*time.Second, interval).Should(Succeed())
})
})

// createCrashingBuild creates a build which contains game servers that will crash on start
func createCrashingBuild(buildName, buildID, img string) *mpsv1alpha1.GameServerBuild {
return &mpsv1alpha1.GameServerBuild{
ObjectMeta: metav1.ObjectMeta{
Name: buildName,
Namespace: testNamespace,
},
Spec: mpsv1alpha1.GameServerBuildSpec{
BuildID: buildID,
TitleID: "1E03",
PortsToExpose: []mpsv1alpha1.PortToExpose{{ContainerName: containerName, PortName: portKey}},
StandingBy: 2,
Max: 4,
CrashesToMarkUnhealthy: 5,
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Image: img,
Name: containerName,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"/bin/sh", "-c", "sleep 2 && command_that_does_not_exist"},
Ports: []corev1.ContainerPort{
{
Name: portKey,
ContainerPort: 80,
},
},
},
},
},
},
},
}
}
Loading

0 comments on commit a4a9f70

Please sign in to comment.