diff --git a/test/performance/framework/networkpolicy/scale_up.go b/test/performance/framework/networkpolicy/scale_up.go index cab599a7a09..b121ac55ce7 100644 --- a/test/performance/framework/networkpolicy/scale_up.go +++ b/test/performance/framework/networkpolicy/scale_up.go @@ -143,10 +143,11 @@ func ScaleUp(ctx context.Context, kubeConfig *rest.Config, cs kubernetes.Interfa } actualCheckNum++ if fromPod.Namespace != client_pod.ClientPodsNamespace { - if err = workload_pod.Update(ctx, cs, fromPod.Namespace, fromPod.Name, []string{fmt.Sprintf("%s:%d", ip, 80)}, workload_pod.ScaleClientPodProbeContainerName); err != nil { + clientPod, err := workload_pod.CreateClientPod(ctx, cs, fromPod.Namespace, fromPod.Name, []string{fmt.Sprintf("%s:%d", ip, 80)}, workload_pod.ScaleClientPodProbeContainerName) + if err != nil { klog.ErrorS(err, "Update test Pod failed") } - klog.InfoS("Update test Pod to check NetworkPolicy") + klog.InfoS("Create test Pod to check NetworkPolicy", "Name", clientPod.Name, "Namespace", clientPod.Namespace) } go func() { if err := utils.WaitUntil(ctx, ch, kubeConfig, cs, fromPod.Namespace, fromPod.Name, ip, true); err != nil { diff --git a/test/performance/framework/pods.go b/test/performance/framework/pods.go index f6a4cfa8503..74713710e6e 100644 --- a/test/performance/framework/pods.go +++ b/test/performance/framework/pods.go @@ -120,7 +120,7 @@ func ScaleUpWorkloadPods(ctx context.Context, ch chan time.Duration, data *Scale time.Sleep(time.Duration(utils.GenRandInt()%100) * time.Millisecond) labelNum := i/2 + 1 gErr.Go(func() error { - podName := fmt.Sprintf("antrea-scale-test-pod-%s", uuid.New().String()) + podName := fmt.Sprintf("antrea-scale-test-pod-server-%s", uuid.New().String()[:8]) pod := newWorkloadPod(podName, ns, true, labelNum) // if !data.Specification.RealNode { // onRealNode := (index % data.nodesNum) >= data.simulateNodesNum diff --git a/test/performance/framework/service/scale_up.go b/test/performance/framework/service/scale_up.go index b27f502b693..eb031e5d0e2 100644 --- a/test/performance/framework/service/scale_up.go +++ b/test/performance/framework/service/scale_up.go @@ -147,18 +147,6 @@ func ScaleUp(ctx context.Context, provider providers.ProviderInterface, controlP return fmt.Errorf("allocate IP from ServiceCIDR error: %+v", err) } - var fromPod *corev1.Pod - if testPodIndex < len(podList.Items) && actualCheckNum < cap(ch) { - fromPod = &podList.Items[testPodIndex] - testPodIndex++ - - if err = workload_pod.Update(ctx, cs, fromPod.Namespace, fromPod.Name, []string{fmt.Sprintf("%s:%d", clusterIP, 80)}, workload_pod.ScaleTestPodProbeContainerName); err != nil { - klog.ErrorS(err, "Update test Pod failed") - } - klog.InfoS("Update test Pod to check Service", "ClusterIP", clusterIP) - time.Sleep(1 * time.Second) - } - var newSvc *corev1.Service var err error svc.Spec.ClusterIP = clusterIP.String() @@ -177,19 +165,20 @@ func ScaleUp(ctx context.Context, provider providers.ProviderInterface, controlP klog.InfoS("Create Service", "Name", newSvc.Name, "ClusterIP", newSvc.Spec.ClusterIP, "Namespace", ns) svcs = append(svcs, ServiceInfo{Name: newSvc.Name, IP: newSvc.Spec.ClusterIP, NameSpace: newSvc.Namespace}) - // ip := newSvc.Spec.ClusterIP klog.InfoS("go FetchTimestampFromLog", "actualCheckNum", actualCheckNum, "cap(ch)", cap(ch)) - if fromPod != nil && actualCheckNum < cap(ch) { - // k := int(utils.GenRandInt()) % len(clientPods) - // clientPod := clientPods[k] - // klog.V(2).InfoS("Check service", "svc", svc, "Pod", clientPod.Name) + if actualCheckNum < cap(ch) { actualCheckNum++ go func() { - // if err := utils.WaitUntil(ctx, ch, kubeConfig, cs, clientPod.Namespace, clientPod.Name, ip, false); err != nil { - // klog.ErrorS(err, "Check readiness of service error", "ClientPodName", clientPod.Name, "svc", svc) - // } - if err := utils.FetchTimestampFromLog(ctx, cs, fromPod.Namespace, fromPod.Name, workload_pod.ScaleTestPodProbeContainerName, ch, startTimeStamp); err != nil { - klog.ErrorS(err, "Check readiness of service error", "ClientPodName", fromPod.Name, "svc", svc) + fromPod := &podList.Items[testPodIndex%len(podList.Items)] + testPodIndex++ + + clientPod, err := workload_pod.CreateClientPod(ctx, cs, fromPod.Namespace, fromPod.Name, []string{fmt.Sprintf("%s:%d", clusterIP, 80)}, workload_pod.ScaleTestPodProbeContainerName) + if err != nil { + klog.ErrorS(err, "Create client test Pod failed") + } + klog.InfoS("Update test Pod to check Service", "ClusterIP", clusterIP) + if err := utils.FetchTimestampFromLog(ctx, cs, clientPod.Namespace, clientPod.Name, workload_pod.ScaleTestPodProbeContainerName, ch, startTimeStamp); err != nil { + klog.ErrorS(err, "Check readiness of service error", "ClientPodName", clientPod.Name, "svc", svc) } }() } diff --git a/test/performance/framework/workload_pod/update.go b/test/performance/framework/workload_pod/update.go index f27b2a49e41..e7c0e5a6b3f 100644 --- a/test/performance/framework/workload_pod/update.go +++ b/test/performance/framework/workload_pod/update.go @@ -19,9 +19,10 @@ const ( ScaleTestPodProbeContainerName = "antrea-scale-test-pod-probe" ) -func Update(ctx context.Context, kClient kubernetes.Interface, namespace, podName string, probes []string, containerName string) error { +func CreateClientPod(ctx context.Context, kClient kubernetes.Interface, namespace, podName string, probes []string, containerName string) (*corev1.Pod, error) { var err error expectContainerNum := 0 + var newPod *corev1.Pod err = retry.RetryOnConflict(retry.DefaultRetry, func() error { pod, err := kClient.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { @@ -57,22 +58,22 @@ func Update(ctx context.Context, kClient kubernetes.Interface, namespace, podNam pod.Spec.Containers = append(pod.Spec.Containers, containers...) expectContainerNum = len(pod.Spec.Containers) - err = kClient.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{}) - if err != nil { - return err - } - - err = wait.PollImmediate(2*time.Second, 60*time.Second, func() (done bool, err error) { - _, err = kClient.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) - return err != nil, nil - }) - - if err != nil { - return fmt.Errorf("error waiting for Pod termination: %v", err) - } - newPod := &corev1.Pod{ + // err = kClient.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{}) + // if err != nil { + // return err + // } + // + // err = wait.PollImmediate(2*time.Second, 60*time.Second, func() (done bool, err error) { + // _, err = kClient.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + // return err != nil, nil + // }) + // + // if err != nil { + // return fmt.Errorf("error waiting for Pod termination: %v", err) + // } + newPod = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: pod.Name, + Name: strings.Replace(pod.Name, "server", "client", 1), Namespace: pod.Namespace, Labels: pod.Labels, }, @@ -83,11 +84,11 @@ func Update(ctx context.Context, kClient kubernetes.Interface, namespace, podNam return err }) if err != nil { - return err + return nil, err } err = wait.PollImmediate(time.Second, 30, func() (bool, error) { - pod, err := kClient.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) + pod, err := kClient.CoreV1().Pods(namespace).Get(ctx, newPod.Name, metav1.GetOptions{}) if err != nil { return false, err } @@ -99,9 +100,9 @@ func Update(ctx context.Context, kClient kubernetes.Interface, namespace, podNam return false, nil }) if err != nil { - return err + return nil, err } - klog.InfoS("Container added successfully!") - return nil + klog.InfoS("Create Client Pod successfully!") + return newPod, nil }