Skip to content

Commit

Permalink
record response time
Browse files Browse the repository at this point in the history
Signed-off-by: Wenqi Qiu <[email protected]>
  • Loading branch information
wenqiq committed Jan 26, 2024
1 parent 5bad1ba commit b879466
Show file tree
Hide file tree
Showing 10 changed files with 130 additions and 100 deletions.
40 changes: 29 additions & 11 deletions test/performance/framework/case.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
"antrea.io/antrea/test/performance/framework/table"
)

type RunFunc func(ctx context.Context, ch chan ResponseTime, data *ScaleData) error
type RunFunc func(ctx context.Context, ch chan time.Duration, data *ScaleData) error

var cases = make(map[string]RunFunc, 128)

Expand Down Expand Up @@ -64,26 +64,44 @@ type ResponseTime struct {

func (c *ScaleTestCase) Run(ctx context.Context, testData *ScaleData) error {
ctx = wrapScaleTestName(ctx, c.name)
done := make(chan interface{}, 1)

startTime := time.Now()
caseName := ctx.Value(CtxScaleCaseName).(string)
ch := make(chan ResponseTime, 1)
testData.maxCheckNum = testData.nodesNum * 2
ress := make(chan time.Duration, testData.maxCheckNum)
res := "failed"
defer func() {
var rows [][]string
select {
case respTime := <-ch:
rows = append(rows, table.GenerateRow(caseName, res, time.Since(startTime),
respTime.avg.String(), respTime.max.String(), respTime.min.String()))
table.ShowResult(os.Stdout, rows)
case <-time.After(testData.checkTimeout):
klog.InfoS("wait timeout", "check time duration", testData.checkTimeout)

var total, minRes, maxRes, avg time.Duration
count := 0
for i := 0; i < testData.maxCheckNum; i++ {
res := <-ress
total += res
count++

if count == 1 || res < minRes {
minRes = res
}

if res > maxRes {
maxRes = res
}
}

avg = total / time.Duration(count)

rows = append(rows, table.GenerateRow(caseName, res, time.Since(startTime).String(),
avg.String(), maxRes.String(), minRes.String()))
table.ShowResult(os.Stdout, rows)

close(ress)
close(done)
}()

done := make(chan interface{}, 1)
go func() {
done <- c.run(ctx, ch, testData)
done <- c.run(ctx, ress, testData)
}()

select {
Expand Down
73 changes: 32 additions & 41 deletions test/performance/framework/networkpolicy.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,58 +19,49 @@ import (
"fmt"
"time"

"k8s.io/klog/v2"

"antrea.io/antrea/test/performance/framework/networkpolicy"
"antrea.io/antrea/test/performance/utils"
)

func init() {
RegisterFunc("ScaleNetworkPolicy", ScaleNetworkPolicy)
}

func ScaleNetworkPolicy(ctx context.Context, ch chan ResponseTime, data *ScaleData) error {
nps, err := networkpolicy.ScaleUp(ctx, data.kubeconfig, data.kubernetesClientSet, data.namespaces,
data.Specification.NpNumPerNs, data.clientPods, data.Specification.IPv6)
func ScaleNetworkPolicy(ctx context.Context, ch chan time.Duration, data *ScaleData) error {
_, err := networkpolicy.ScaleUp(ctx, data.kubeconfig, data.kubernetesClientSet, data.namespaces,
data.Specification.NpNumPerNs, data.clientPods, data.Specification.IPv6, data.maxCheckNum, ch)
if err != nil {
return fmt.Errorf("scale up NetworkPolicies error: %v", err)
}

maxNPCheckedCount := data.nodesNum

start := time.Now()
for i, np := range nps {
if utils.CheckTimeout(start, data.checkTimeout) || i > maxNPCheckedCount {
klog.InfoS("NetworkPolicies check deadline exceeded", "count", i)
break
}

// Check connection of Pods in NetworkPolicies, workload Pods
fromPod, ip, err := networkpolicy.SelectConnectPod(ctx, data.kubernetesClientSet, np.Namespace, &nps[i])
if err != nil || fromPod == nil || ip == "" {
continue
}
if err := PingIP(ctx, data.kubeconfig, data.kubernetesClientSet, fromPod.Namespace, fromPod.Name, ip); err != nil {
return fmt.Errorf("the connection should be success, NetworkPolicyName: %s, FromPod: %s, ToPod: %s",
np.Name, fromPod.Name, ip)
}

// Check isolation of Pods in NetworkPolicies, client Pods to workload Pods
fromPod, ip, err = networkpolicy.SelectIsoPod(ctx, data.kubernetesClientSet, np.Namespace, np, data.clientPods)
if err != nil || fromPod == nil || ip == "" {
continue
}
if err := PingIP(ctx, data.kubeconfig, data.kubernetesClientSet, fromPod.Namespace, fromPod.Name, ip); err == nil {
return fmt.Errorf("the connection should not be success, NetworkPolicyName: %s, FromPod: %s, ToPodIP: %s", np.Name, fromPod.Name, ip)
}
klog.InfoS("Checked networkPolicy", "Name", np.Name, "Namespace", np.Namespace, "count", i, "maxNum", maxNPCheckedCount)
}
respTime := ResponseTime{
max: 1,
min: 5,
avg: 3,
}
ch <- respTime
// maxNPCheckedCount := data.nodesNum
//
// start := time.Now()
// for i, np := range nps {
// if utils.CheckTimeout(start, data.checkTimeout) || i > maxNPCheckedCount {
// klog.InfoS("NetworkPolicies check deadline exceeded", "count", i)
// break
// }
//
// // Check connection of Pods in NetworkPolicies, workload Pods
// fromPod, ip, err := networkpolicy.SelectConnectPod(ctx, data.kubernetesClientSet, np.Namespace, &nps[i])
// if err != nil || fromPod == nil || ip == "" {
// continue
// }
// if err := PingIP(ctx, ch, data.kubeconfig, data.kubernetesClientSet, fromPod.Namespace, fromPod.Name, ip); err != nil {
// return fmt.Errorf("the connection should be success, NetworkPolicyName: %s, FromPod: %s, ToPod: %s",
// np.Name, fromPod.Name, ip)
// }
//
// // Check isolation of Pods in NetworkPolicies, client Pods to workload Pods
// fromPod, ip, err = networkpolicy.SelectIsoPod(ctx, data.kubernetesClientSet, np.Namespace, np, data.clientPods)
// if err != nil || fromPod == nil || ip == "" {
// continue
// }
// if err := PingIP(ctx, ch, data.kubeconfig, data.kubernetesClientSet, fromPod.Namespace, fromPod.Name, ip); err == nil {
// return fmt.Errorf("the connection should not be success, NetworkPolicyName: %s, FromPod: %s, ToPodIP: %s", np.Name, fromPod.Name, ip)
// }
// klog.InfoS("Checked networkPolicy", "Name", np.Name, "Namespace", np.Namespace, "count", i, "maxNum", maxNPCheckedCount)
// }
if err := networkpolicy.ScaleDown(ctx, data.namespaces, data.kubernetesClientSet); err != nil {
return err
}
Expand Down
59 changes: 33 additions & 26 deletions test/performance/framework/networkpolicy/scale_up.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,16 +94,21 @@ type NetworkPolicyInfo struct {
toIP string
}

func ScaleUp(ctx context.Context, kubeConfig *rest.Config, cs kubernetes.Interface, nss []string, numPerNs int, clientPods []corev1.Pod, ipv6 bool) (nps []NetworkPolicyInfo, err error) {
func ScaleUp(ctx context.Context, kubeConfig *rest.Config, cs kubernetes.Interface, nss []string, numPerNs int, clientPods []corev1.Pod, ipv6 bool, maxCheckNum int, ch chan time.Duration) (nps []NetworkPolicyInfo, err error) {
// ScaleUp networkPolicies
start := time.Now()
checkCount := 0
for _, ns := range nss {
npsData, err := generateNetworkPolicies(ns, numPerNs)
if err != nil {
return nil, fmt.Errorf("error when generating network policies: %w", err)
}
klog.InfoS("Scale up NetworkPolicies", "Num", len(npsData), "Namespace", ns)
for _, np := range npsData {
shouldCheck := true
if checkCount > maxCheckNum {
shouldCheck = false
}
if err := utils.DefaultRetry(func() error {
newNP, err := cs.NetworkingV1().NetworkPolicies(ns).Create(ctx, np, metav1.CreateOptions{})
if err != nil {
Expand All @@ -114,31 +119,33 @@ func ScaleUp(ctx context.Context, kubeConfig *rest.Config, cs kubernetes.Interfa
}
}
npInfo := NetworkPolicyInfo{Name: newNP.Name, Namespace: newNP.Namespace, Spec: newNP.Spec}

// // Check connection of Pods in NetworkPolicies, workload Pods
// fromPod, ip, err := SelectConnectPod(ctx, cs, newNP.Namespace, &npInfo)
// if err != nil {
// return err
// }
// npInfo.fromPodName = fromPod.Name
// npInfo.fromPodNamespace = fromPod.Namespace
// npInfo.toIP = ip
// go func() {
// if err := framework.PingIP(ctx, kubeConfig, cs, fromPod.Namespace, fromPod.Name, ip); err != nil {
// klog.ErrorS(err, "the connection should be success", "NetworkPolicyName", np.Name, "FromPodName", fromPod.Name, "ToIP", ip)
// }
// }()
//
// // Check isolation of Pods in NetworkPolicies, client Pods to workload Pods
// fromPod, ip, err = SelectIsoPod(ctx, cs, np.Namespace, npInfo, clientPods)
// if err != nil {
// return err
// }
// go func() {
// if err := framework.PingIP(ctx, kubeConfig, cs, fromPod.Namespace, fromPod.Name, ip); err == nil {
// klog.ErrorS(err, "the connection should not be success", "NetworkPolicyName", np.Name, "FromPodName", fromPod.Name, "ToIP", ip)
// }
// }()
if shouldCheck {
// Check connection of Pods in NetworkPolicies, workload Pods
fromPod, ip, err := SelectConnectPod(ctx, cs, newNP.Namespace, &npInfo)
if err != nil {
return err
}
npInfo.fromPodName = fromPod.Name
npInfo.fromPodNamespace = fromPod.Namespace
npInfo.toIP = ip
go func() {
if err := utils.WaitUntil(ctx, ch, kubeConfig, cs, fromPod.Namespace, fromPod.Name, ip, false); err != nil {
klog.ErrorS(err, "the connection should be success", "NetworkPolicyName", np.Name, "FromPodName", fromPod.Name, "ToIP", ip)
}
}()

// Check isolation of Pods in NetworkPolicies, client Pods to workload Pods
fromPod, ip, err = SelectIsoPod(ctx, cs, np.Namespace, npInfo, clientPods)
if err != nil {
return err
}
go func() {
if err := utils.WaitUntil(ctx, ch, kubeConfig, cs, fromPod.Namespace, fromPod.Name, ip, true); err != nil {
klog.ErrorS(err, "the connection should not be success", "NetworkPolicyName", np.Name, "FromPodName", fromPod.Name, "ToIP", ip)
}
}()
checkCount += 2
}

nps = append(nps, npInfo)
return nil
Expand Down
2 changes: 1 addition & 1 deletion test/performance/framework/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func newWorkloadPod(podName, ns string, onRealNode bool, labelNum int) *corev1.P
return workloadPodTemplate(podName, ns, labels, onRealNode)
}

func ScaleUpWorkloadPods(ctx context.Context, ch chan ResponseTime, data *ScaleData) error {
func ScaleUpWorkloadPods(ctx context.Context, ch chan time.Duration, data *ScaleData) error {
if data.Specification.SkipDeployWorkload {
klog.V(2).InfoS("Skip creating workload Pods", "SkipDeployWorkload", data.Specification.SkipDeployWorkload)
return nil
Expand Down
7 changes: 4 additions & 3 deletions test/performance/framework/restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ package framework
//goland:noinspection ALL
import (
"context"
"time"

appv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -33,7 +34,7 @@ func init() {
RegisterFunc("RestartOVSContainer", RestartOVSContainer)
}

func ScaleRestartAgent(ctx context.Context, ch chan ResponseTime, data *ScaleData) error {
func ScaleRestartAgent(ctx context.Context, ch chan time.Duration, data *ScaleData) error {
err := data.kubernetesClientSet.CoreV1().Pods(metav1.NamespaceSystem).
DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "app=antrea,component=antrea-agent"})
if err != nil {
Expand All @@ -56,7 +57,7 @@ func ScaleRestartAgent(ctx context.Context, ch chan ResponseTime, data *ScaleDat
}, ctx.Done())
}

func RestartController(ctx context.Context, ch chan ResponseTime, data *ScaleData) error {
func RestartController(ctx context.Context, ch chan time.Duration, data *ScaleData) error {
err := data.kubernetesClientSet.CoreV1().Pods(metav1.NamespaceSystem).
DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "app=antrea,component=antrea-controller"})
if err != nil {
Expand All @@ -75,6 +76,6 @@ func RestartController(ctx context.Context, ch chan ResponseTime, data *ScaleDat
}, ctx.Done())
}

func RestartOVSContainer(ctx context.Context, ch chan ResponseTime, data *ScaleData) error {
func RestartOVSContainer(ctx context.Context, ch chan time.Duration, data *ScaleData) error {
return ScaleRestartAgent(ctx, ch, data)
}
1 change: 1 addition & 0 deletions test/performance/framework/scale_up.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ type ScaleData struct {
namespaces []string
Specification *config.ScaleList
nodesNum int
maxCheckNum int
simulateNodesNum int
podsNumPerNs int
checkTimeout time.Duration
Expand Down
6 changes: 3 additions & 3 deletions test/performance/framework/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func init() {
RegisterFunc("ScaleServiceDemo", ScaleServiceDemo)
}

func ScaleService(ctx context.Context, ch chan ResponseTime, data *ScaleData) error {
func ScaleService(ctx context.Context, ch chan time.Duration, data *ScaleData) error {
svcs, err := service.ScaleUp(ctx, data.kubernetesClientSet, data.namespaces, data.Specification.SvcNumPerNs, data.Specification.IPv6)
if err != nil {
return fmt.Errorf("scale up services error: %v", err)
Expand All @@ -49,7 +49,7 @@ func ScaleService(ctx context.Context, ch chan ResponseTime, data *ScaleData) er
k := int(utils.GenRandInt()) % len(data.clientPods)
clientPod := data.clientPods[k]
svc := svcs[i]
if err := PingIP(ctx, data.kubeconfig, data.kubernetesClientSet, clientPod.Namespace, clientPod.Name, svc.IP); err != nil {
if err := utils.PingIP(ctx, data.kubeconfig, data.kubernetesClientSet, clientPod.Namespace, clientPod.Name, svc.IP); err != nil {
klog.ErrorS(err, "Check readiness of service error", "ClientPodName", clientPod.Name, "svc", svc)
return err
}
Expand All @@ -62,7 +62,7 @@ func ScaleService(ctx context.Context, ch chan ResponseTime, data *ScaleData) er
return nil
}

func ScaleServiceDemo(ctx context.Context, ch chan ResponseTime, data *ScaleData) error {
func ScaleServiceDemo(ctx context.Context, ch chan time.Duration, data *ScaleData) error {
list, err := data.kubernetesClientSet.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
return err
Expand Down
8 changes: 3 additions & 5 deletions test/performance/framework/table/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,9 @@
package table

import (
"github.com/olekukonko/tablewriter"
"io"
"strings"
"time"

"github.com/olekukonko/tablewriter"
)

const (
Expand All @@ -28,9 +26,9 @@ const (
CtxMinResponseTime = "MinResponseTime"
)

func GenerateRow(name, result string, duration time.Duration, avgTime, maxTime, minTime string) []string {
func GenerateRow(name, result string, duration, avgTime, maxTime, minTime string) []string {
name = strings.ReplaceAll(name, " ", "-")
return []string{name, result, duration.String(), avgTime, maxTime, minTime}
return []string{name, result, duration, avgTime, maxTime, minTime}
}

func ShowResult(w io.Writer, rows [][]string) {
Expand Down
4 changes: 2 additions & 2 deletions test/performance/framework/table/table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (
func TestTableName(t *testing.T) {
startTime := time.Now()
var rows [][]string
rows = append(rows, GenerateRow("BenchmarkInitXLargeScaleWithNetpolPerPod-2 123", "success", time.Since(startTime)))
rows = append(rows, GenerateRow("caseName1", "fail", time.Since(startTime)))
rows = append(rows, GenerateRow("BenchmarkInitXLargeScaleWithNetpolPerPod-2 123", "success", time.Since(startTime).String(), "", "", ""))
rows = append(rows, GenerateRow("caseName1", "fail", time.Since(startTime).String(), "", "", ""))
ShowResult(os.Stdout, rows)
}
Loading

0 comments on commit b879466

Please sign in to comment.