-
Notifications
You must be signed in to change notification settings - Fork 47
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* making more tests parallel * better name for the namespace
- Loading branch information
1 parent
8889523
commit a4a9f70
Showing
17 changed files
with
1,342 additions
and
979 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,114 @@ | ||
package main | ||
|
||
import ( | ||
"context" | ||
"crypto/tls" | ||
"fmt" | ||
|
||
"github.com/google/uuid" | ||
. "github.com/onsi/ginkgo/v2" | ||
. "github.com/onsi/gomega" | ||
mpsv1alpha1 "github.com/playfab/thundernetes/pkg/operator/api/v1alpha1" | ||
"k8s.io/client-go/kubernetes" | ||
ctrl "sigs.k8s.io/controller-runtime" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
) | ||
|
||
var _ = Describe("test GameServerBuild with allocation tests", Ordered, func() { | ||
testBuildAllocationName := "testbuildallocation" | ||
testBuildAllocationID := "85ffe8da-c82f-4035-86c5-9d2b5f42d6aa" | ||
var cert tls.Certificate | ||
ctx := context.Background() | ||
var kubeClient client.Client | ||
var coreClient *kubernetes.Clientset | ||
BeforeAll(func() { | ||
var err error | ||
cert, err = tls.LoadX509KeyPair(certFile, keyFile) | ||
Expect(err).ToNot(HaveOccurred()) | ||
kubeConfig := ctrl.GetConfigOrDie() | ||
kubeClient, err = createKubeClient(kubeConfig) | ||
Expect(err).ToNot(HaveOccurred()) | ||
err = kubeClient.Create(ctx, createE2eBuild(testBuildAllocationName, testBuildAllocationID, img)) | ||
Expect(err).ToNot(HaveOccurred()) | ||
coreClient, err = kubernetes.NewForConfig(kubeConfig) | ||
Expect(err).ToNot(HaveOccurred()) | ||
|
||
Eventually(func(g Gomega) { | ||
state := buildState{ | ||
buildName: testBuildAllocationName, | ||
buildID: testBuildAllocationID, | ||
standingByCount: 2, | ||
podRunningCount: 2, | ||
gsbHealth: mpsv1alpha1.BuildHealthy, | ||
} | ||
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed()) | ||
}, timeout, interval).Should(Succeed()) | ||
}) | ||
It("should return 400 with a non-GUID sessionID", func() { | ||
// allocating with a non-Guid sessionID, expecting 400 | ||
sessionID1_5 := "notAGuid" | ||
Expect(allocate(testBuildAllocationID, sessionID1_5, cert)).To(Equal(fmt.Errorf("%s 400", invalidStatusCode))) | ||
Eventually(func(g Gomega) { | ||
state := buildState{ | ||
buildName: testBuildAllocationName, | ||
buildID: testBuildAllocationID, | ||
standingByCount: 2, | ||
podRunningCount: 2, | ||
gsbHealth: mpsv1alpha1.BuildHealthy, | ||
} | ||
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed()) | ||
}, timeout, interval).Should(Succeed()) | ||
|
||
}) | ||
It("should return 400 with a non-GUID BuildID", func() { | ||
// allocating with a non-Guid BuildID, expecting 400 | ||
sessionID1_6 := uuid.New().String() | ||
Expect(allocate("not_a_guid", sessionID1_6, cert)).To(Equal(fmt.Errorf("%s 400", invalidStatusCode))) | ||
Eventually(func(g Gomega) { | ||
state := buildState{ | ||
buildName: testBuildAllocationName, | ||
buildID: testBuildAllocationID, | ||
standingByCount: 2, | ||
podRunningCount: 2, | ||
gsbHealth: mpsv1alpha1.BuildHealthy, | ||
} | ||
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed()) | ||
}, timeout, interval).Should(Succeed()) | ||
}) | ||
|
||
It("should return 404 with a non-existent BuildID", func() { | ||
// allocating on non-existent BuildID, expecting 404 | ||
sessionID1_7 := uuid.New().String() | ||
Expect(allocate(uuid.New().String(), sessionID1_7, cert)).To(Equal(fmt.Errorf("%s 404", invalidStatusCode))) | ||
Eventually(func(g Gomega) { | ||
state := buildState{ | ||
buildName: testBuildAllocationName, | ||
buildID: testBuildAllocationID, | ||
standingByCount: 2, | ||
podRunningCount: 2, | ||
gsbHealth: mpsv1alpha1.BuildHealthy, | ||
} | ||
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed()) | ||
}, timeout, interval).Should(Succeed()) | ||
}) | ||
|
||
It("should allocate properly and get one active", func() { | ||
// allocating correctly, expecting one active | ||
sessionID1_2 := uuid.New().String() | ||
Expect(allocate(testBuildAllocationID, sessionID1_2, cert)).To(Succeed()) | ||
Eventually(func(g Gomega) { | ||
state := buildState{ | ||
buildName: testBuildAllocationName, | ||
buildID: testBuildAllocationID, | ||
standingByCount: 2, | ||
activeCount: 1, | ||
podRunningCount: 3, | ||
gsbHealth: mpsv1alpha1.BuildHealthy, | ||
} | ||
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed()) | ||
}, timeout, interval).Should(Succeed()) | ||
|
||
Expect(validateThatAllocatedServersHaveReadyForPlayersUnblocked(ctx, kubeClient, coreClient, testBuildAllocationID, 1)).To(Succeed()) | ||
|
||
}) | ||
}) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,124 @@ | ||
package main | ||
|
||
import ( | ||
"context" | ||
"time" | ||
|
||
. "github.com/onsi/ginkgo/v2" | ||
. "github.com/onsi/gomega" | ||
mpsv1alpha1 "github.com/playfab/thundernetes/pkg/operator/api/v1alpha1" | ||
corev1 "k8s.io/api/core/v1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
ctrl "sigs.k8s.io/controller-runtime" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
) | ||
|
||
var _ = Describe("Crashing Build", func() { | ||
testBuildCrashingName := "crashing" | ||
testCrashingBuildID := "85ffe8da-c82f-4035-86c5-9d2b5f42d6f7" | ||
It("should become unhealthy", func() { | ||
ctx := context.Background() | ||
kubeConfig := ctrl.GetConfigOrDie() | ||
kubeClient, err := createKubeClient(kubeConfig) | ||
Expect(err).ToNot(HaveOccurred()) | ||
err = kubeClient.Create(ctx, createCrashingBuild(testBuildCrashingName, testCrashingBuildID, img)) | ||
Expect(err).ToNot(HaveOccurred()) | ||
|
||
Eventually(func(g Gomega) { | ||
gsb := &mpsv1alpha1.GameServerBuild{} | ||
err := kubeClient.Get(ctx, client.ObjectKey{Name: testBuildCrashingName, Namespace: testNamespace}, gsb) | ||
g.Expect(err).ToNot(HaveOccurred()) | ||
crashesEqualOrLargerThan5 := gsb.Status.CrashesCount >= 5 | ||
g.Expect(crashesEqualOrLargerThan5).To(BeTrue()) | ||
state := buildState{ | ||
buildName: testBuildCrashingName, | ||
buildID: testCrashingBuildID, | ||
initializingCount: 0, | ||
standingByCount: 0, | ||
podRunningCount: 0, | ||
gsbHealth: mpsv1alpha1.BuildUnhealthy, | ||
} | ||
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed()) | ||
}, 45*time.Second, interval).Should(Succeed()) // bigger timeout because of the time crashes take to occur and captured by the controller | ||
|
||
// we are updating the GameServerBuild to be able to have more crashes for it to become Unhealthy | ||
gsb := &mpsv1alpha1.GameServerBuild{} | ||
err = kubeClient.Get(ctx, client.ObjectKey{Name: testBuildCrashingName, Namespace: testNamespace}, gsb) | ||
Expect(err).ToNot(HaveOccurred()) | ||
patch := client.MergeFrom(gsb.DeepCopy()) | ||
gsb.Spec.CrashesToMarkUnhealthy = 10 | ||
|
||
err = kubeClient.Patch(ctx, gsb, patch) | ||
Expect(err).ToNot(HaveOccurred()) | ||
|
||
// so we expect it to be healthy again | ||
Eventually(func(g Gomega) { | ||
gsb := &mpsv1alpha1.GameServerBuild{} | ||
err = kubeClient.Get(ctx, client.ObjectKey{Name: testBuildCrashingName, Namespace: testNamespace}, gsb) | ||
Expect(err).ToNot(HaveOccurred()) | ||
state := buildState{ | ||
buildName: testBuildCrashingName, | ||
buildID: testCrashingBuildID, | ||
initializingCount: 0, | ||
standingByCount: 0, | ||
podRunningCount: 0, | ||
gsbHealth: mpsv1alpha1.BuildHealthy, | ||
} | ||
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed()) | ||
}, 10*time.Second, interval).Should(Succeed()) | ||
|
||
// but only temporarily, since the game servers will continue to crash | ||
Eventually(func(g Gomega) { | ||
gsb := &mpsv1alpha1.GameServerBuild{} | ||
err = kubeClient.Get(ctx, client.ObjectKey{Name: testBuildCrashingName, Namespace: testNamespace}, gsb) | ||
Expect(err).ToNot(HaveOccurred()) | ||
var crashesEqualOrLargerThan10 bool = gsb.Status.CrashesCount >= 10 | ||
g.Expect(crashesEqualOrLargerThan10).To(BeTrue()) | ||
state := buildState{ | ||
buildName: testBuildCrashingName, | ||
buildID: testCrashingBuildID, | ||
initializingCount: 0, | ||
standingByCount: 0, | ||
podRunningCount: 0, | ||
gsbHealth: mpsv1alpha1.BuildUnhealthy, | ||
} | ||
g.Expect(verifyGameServerBuildOverall(ctx, kubeClient, state)).To(Succeed()) | ||
}, 30*time.Second, interval).Should(Succeed()) | ||
}) | ||
}) | ||
|
||
// createCrashingBuild creates a build which contains game servers that will crash on start | ||
func createCrashingBuild(buildName, buildID, img string) *mpsv1alpha1.GameServerBuild { | ||
return &mpsv1alpha1.GameServerBuild{ | ||
ObjectMeta: metav1.ObjectMeta{ | ||
Name: buildName, | ||
Namespace: testNamespace, | ||
}, | ||
Spec: mpsv1alpha1.GameServerBuildSpec{ | ||
BuildID: buildID, | ||
TitleID: "1E03", | ||
PortsToExpose: []mpsv1alpha1.PortToExpose{{ContainerName: containerName, PortName: portKey}}, | ||
StandingBy: 2, | ||
Max: 4, | ||
CrashesToMarkUnhealthy: 5, | ||
Template: corev1.PodTemplateSpec{ | ||
Spec: corev1.PodSpec{ | ||
Containers: []corev1.Container{ | ||
{ | ||
Image: img, | ||
Name: containerName, | ||
ImagePullPolicy: corev1.PullIfNotPresent, | ||
Command: []string{"/bin/sh", "-c", "sleep 2 && command_that_does_not_exist"}, | ||
Ports: []corev1.ContainerPort{ | ||
{ | ||
Name: portKey, | ||
ContainerPort: 80, | ||
}, | ||
}, | ||
}, | ||
}, | ||
}, | ||
}, | ||
}, | ||
} | ||
} |
Oops, something went wrong.