diff --git a/pkg/builder/builder.go b/pkg/builder/builder.go index 168b3a4..4356ea3 100644 --- a/pkg/builder/builder.go +++ b/pkg/builder/builder.go @@ -40,10 +40,12 @@ func NewBuilder(ctx *pulumi.Context, args BuilderComponentArgs, opts ...pulumi.R } // Create service account - sa, err := corev1.NewServiceAccount(ctx, fmt.Sprintf("%s-sa", args.Name), &corev1.ServiceAccountArgs{ + serviceAccountName := fmt.Sprintf("%s-sa", args.Name) + sa, err := corev1.NewServiceAccount(ctx, serviceAccountName, &corev1.ServiceAccountArgs{ Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.String(fmt.Sprintf("%s-sa", args.Name)), + Name: pulumi.String(serviceAccountName), Namespace: pulumi.String(args.Namespace), + Labels: utils.CreateResourceLabels(args.Name, serviceAccountName, args.Name, nil), }, }, pulumi.Parent(component)) if err != nil { @@ -52,15 +54,12 @@ func NewBuilder(ctx *pulumi.Context, args BuilderComponentArgs, opts ...pulumi.R component.ServiceAccount = sa // Create ConfigMap for environment variables + configMapName := fmt.Sprintf("%s-env", args.Name) configMap, err := utils.CreateConfigMap( ctx, - fmt.Sprintf("%s-configmap", args.Name), + configMapName, pulumi.String(args.Namespace), - pulumi.StringMap{ - "app": pulumi.String(args.Name), - "app.kubernetes.io/name": pulumi.String(args.Name), - "app.kubernetes.io/part-of": pulumi.String(args.Name), - }, + utils.CreateResourceLabels(args.Name, configMapName, args.Name, nil), args.BuilderEnv, ) if err != nil { @@ -68,23 +67,29 @@ func NewBuilder(ctx *pulumi.Context, args BuilderComponentArgs, opts ...pulumi.R } component.ConfigMap = configMap + // Create pod labels with app label for routing + podLabels := utils.CreateResourceLabels(args.Name, args.Name, args.Name, args.AppLabels.Labels) + podLabels["app"] = pulumi.String(args.Name) + // Create deployment - deployment, err := appsv1.NewDeployment(ctx, fmt.Sprintf("%s-deployment", args.Name), &appsv1.DeploymentArgs{ + deploymentName := fmt.Sprintf("%s-deployment", args.Name) + deployment, err := appsv1.NewDeployment(ctx, deploymentName, &appsv1.DeploymentArgs{ Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.String(fmt.Sprintf("%s-deployment", args.Name)), + Name: pulumi.String(deploymentName), Namespace: pulumi.String(args.Namespace), + Labels: utils.CreateResourceLabels(args.Name, deploymentName, args.Name, nil), }, Spec: &appsv1.DeploymentSpecArgs{ Replicas: pulumi.Int(DefaultReplicas), Selector: &metav1.LabelSelectorArgs{ - MatchLabels: args.AppLabels.Labels, + MatchLabels: podLabels, }, Template: &corev1.PodTemplateSpecArgs{ Metadata: &metav1.ObjectMetaArgs{ - Labels: args.AppLabels.Labels, + Labels: podLabels, }, Spec: &corev1.PodSpecArgs{ - ServiceAccountName: pulumi.String(fmt.Sprintf("%s-sa", args.Name)), + ServiceAccountName: pulumi.String(serviceAccountName), Containers: corev1.ContainerArray{ &corev1.ContainerArgs{ Name: pulumi.String(args.Name), @@ -144,10 +149,12 @@ func NewBuilder(ctx *pulumi.Context, args BuilderComponentArgs, opts ...pulumi.R component.Deployment = deployment // Create service - service, err := corev1.NewService(ctx, fmt.Sprintf("%s-service", args.Name), &corev1.ServiceArgs{ + serviceName := fmt.Sprintf("%s-service", args.Name) + service, err := corev1.NewService(ctx, serviceName, &corev1.ServiceArgs{ Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.String(fmt.Sprintf("%s-service", args.Name)), + Name: pulumi.String(serviceName), Namespace: pulumi.String(args.Namespace), + Labels: utils.CreateResourceLabels(args.Name, serviceName, args.Name, nil), Annotations: pulumi.StringMap{ "prometheus.io/scrape": pulumi.String("true"), "prometheus.io/port": pulumi.Sprintf("%d", MetricsPort), @@ -155,7 +162,7 @@ func NewBuilder(ctx *pulumi.Context, args BuilderComponentArgs, opts ...pulumi.R }, }, Spec: &corev1.ServiceSpecArgs{ - Selector: args.AppLabels.Labels, + Selector: podLabels, Ports: corev1.ServicePortArray{ &corev1.ServicePortArgs{ Port: parseBuilderPort(args.BuilderEnv.BuilderPort), @@ -176,17 +183,19 @@ func NewBuilder(ctx *pulumi.Context, args BuilderComponentArgs, opts ...pulumi.R component.Service = service // Create pod monitor + podMonitorName := fmt.Sprintf("%s-pod-monitor", args.Name) _, err = crd.NewCustomResource(ctx, fmt.Sprintf("%s-svcmon", args.Name), &crd.CustomResourceArgs{ ApiVersion: pulumi.String("monitoring.coreos.com/v1"), Kind: pulumi.String("PodMonitor"), Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.String(fmt.Sprintf("%s-pod-monitor", args.Name)), + Name: pulumi.String(podMonitorName), Namespace: pulumi.String(args.Namespace), + Labels: utils.CreateResourceLabels(args.Name, podMonitorName, args.Name, nil), }, OtherFields: map[string]interface{}{ "spec": map[string]interface{}{ "selector": map[string]interface{}{ - "matchLabels": args.AppLabels.Labels, + "matchLabels": podLabels, }, "namespaceSelector": map[string]interface{}{ "any": true, diff --git a/pkg/builder/helpers.go b/pkg/builder/helpers.go index e647fb2..1af4e7e 100644 --- a/pkg/builder/helpers.go +++ b/pkg/builder/helpers.go @@ -1 +1,13 @@ package builder + +import ( + "github.com/init4tech/signet-infra-components/pkg/utils" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// NewAppLabels creates a new AppLabels instance with consistent Kubernetes labels +func NewAppLabels(app, name, partOf string, additionalLabels pulumi.StringMap) AppLabels { + return AppLabels{ + Labels: utils.CreateResourceLabels(app, name, partOf, additionalLabels), + } +} diff --git a/pkg/signet_node/helpers.go b/pkg/signet_node/helpers.go index b18f9d9..6fa193d 100644 --- a/pkg/signet_node/helpers.go +++ b/pkg/signet_node/helpers.go @@ -52,15 +52,6 @@ func CreatePersistentVolumeClaim( return pvc, nil } -// CreateResourceLabels creates a consistent set of Kubernetes labels for resources -func CreateResourceLabels(name string) pulumi.StringMap { - return pulumi.StringMap{ - "app": pulumi.String(name), - "app.kubernetes.io/name": pulumi.String(name), - "app.kubernetes.io/part-of": pulumi.String(name), - } -} - // ResourceRequirements returns consistent resource requirements for pods func NewResourceRequirements(cpuLimit, memoryLimit, cpuRequest, memoryRequest string) *corev1.ResourceRequirementsArgs { if cpuLimit == "" { diff --git a/pkg/signet_node/helpers_test.go b/pkg/signet_node/helpers_test.go index b4f2a5b..979ad7e 100644 --- a/pkg/signet_node/helpers_test.go +++ b/pkg/signet_node/helpers_test.go @@ -7,14 +7,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestCreateResourceLabels(t *testing.T) { - name := "test-resource" - labels := CreateResourceLabels(name) - - assert.Equal(t, pulumi.String(name), labels["app"]) - assert.Equal(t, pulumi.String(name), labels["app.kubernetes.io/name"]) - assert.Equal(t, pulumi.String(name), labels["app.kubernetes.io/part-of"]) -} +// TestCreateResourceLabels moved to pkg/utils/labels_test.go since the function was moved there func TestResourceRequirements(t *testing.T) { // Test with default values diff --git a/pkg/signet_node/signet_node.go b/pkg/signet_node/signet_node.go index 89ea5ae..a40a7aa 100644 --- a/pkg/signet_node/signet_node.go +++ b/pkg/signet_node/signet_node.go @@ -26,59 +26,39 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu storageSize := pulumi.String("150Gi") - _, err = corev1.NewPersistentVolumeClaim(ctx, "signet-node-db-data", &corev1.PersistentVolumeClaimArgs{ - Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.String("signet-node-data"), - Labels: pulumi.StringMap{ - "app.kubernetes.io/name": pulumi.String("signet-node-data"), - "app.kubernetes.io/part-of": pulumi.String("signet-node-data"), - }, - Namespace: args.Namespace, - }, - Spec: &corev1.PersistentVolumeClaimSpecArgs{ - AccessModes: pulumi.StringArray{pulumi.String("ReadWriteOnce")}, - Resources: &corev1.VolumeResourceRequirementsArgs{ - Requests: pulumi.StringMap{ - "storage": storageSize, - }, - }, - StorageClassName: pulumi.String("aws-gp3"), - }, - }, pulumi.Parent(component)) + _, err = CreatePersistentVolumeClaim( + ctx, + "signet-node-data", + args.Namespace, + storageSize, + "aws-gp3", + component, + ) if err != nil { return nil, fmt.Errorf("failed to create signet node db data pvc: %w", err) } - _, err = corev1.NewPersistentVolumeClaim(ctx, "rollup-data", &corev1.PersistentVolumeClaimArgs{ - Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.String("rollup-data"), - Labels: pulumi.StringMap{ - "app.kubernetes.io/name": pulumi.String("rollup-data"), - "app.kubernetes.io/part-of": pulumi.String("rollup-data"), - }, - Namespace: args.Namespace, - }, - Spec: &corev1.PersistentVolumeClaimSpecArgs{ - AccessModes: pulumi.StringArray{pulumi.String("ReadWriteOnce")}, - Resources: &corev1.VolumeResourceRequirementsArgs{ - Requests: pulumi.StringMap{ - "storage": storageSize, - }, - }, - StorageClassName: pulumi.String("aws-gp3"), - }, - }, pulumi.Parent(component)) + _, err = CreatePersistentVolumeClaim( + ctx, + "rollup-data", + args.Namespace, + storageSize, + "aws-gp3", + component, + ) if err != nil { return nil, fmt.Errorf("failed to create rollup data pvc: %w", err) } - secret, err := corev1.NewSecret(ctx, "execution-jwt", &corev1.SecretArgs{ + secretName := "execution-jwt" + secret, err := corev1.NewSecret(ctx, secretName, &corev1.SecretArgs{ StringData: pulumi.StringMap{ "jwt.hex": args.ExecutionJwt, }, Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.String("execution-jwt"), + Name: pulumi.String(secretName), Namespace: args.Namespace, + Labels: utils.CreateResourceLabels(args.Name, secretName, args.Name, nil), }, }) if err != nil { @@ -86,14 +66,12 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu } // Create ConfigMap for execution environment variables + executionConfigMapName := "exex-configmap" executionConfigMap, err := utils.CreateConfigMap( ctx, - "exex-configmap", + executionConfigMapName, args.Namespace, - pulumi.StringMap{ - "app.kubernetes.io/name": pulumi.String("exex-configmap"), - "app.kubernetes.io/part-of": pulumi.String("exex-configmap"), - }, + utils.CreateResourceLabels(args.Name, executionConfigMapName, args.Name, nil), args.Env, ) if err != nil { @@ -102,10 +80,10 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu component.SignetNodeConfigMap = executionConfigMap // SERVICE - executionClientName := "signet-node" + executionClientServiceName := fmt.Sprintf("%s-service", executionClientName) - hostExecutionClientService, err := corev1.NewService(ctx, fmt.Sprintf("%s-service", executionClientName), &corev1.ServiceArgs{ + hostExecutionClientService, err := corev1.NewService(ctx, executionClientServiceName, &corev1.ServiceArgs{ Spec: &corev1.ServiceSpecArgs{ Selector: pulumi.StringMap{"app": pulumi.String("signet-node-execution-set")}, Type: pulumi.String("ClusterIP"), @@ -145,12 +123,9 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu }, }, Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.Sprintf("%s-service", executionClientName), + Name: pulumi.String(executionClientServiceName), Namespace: args.Namespace, - Labels: pulumi.StringMap{ - "app.kubernetes.io/name": pulumi.Sprintf("%s-service", executionClientName), - "app.kubernetes.io/part-of": pulumi.Sprintf("%s", executionClientName), - }, + Labels: utils.CreateResourceLabels(args.Name, executionClientServiceName, args.Name, nil), }, }, pulumi.Parent(component)) if err != nil { @@ -160,40 +135,36 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu executionServiceIpString := hostExecutionClientService.Spec.ClusterIP().Elem() // STATEFUL SET - hostStatefulSetName := "signet-node-execution-set" + hostStatefulSetResourceName := fmt.Sprintf("%s-set", hostStatefulSetName) + + // Create pod labels with app label for stateful set + executionPodLabels := utils.CreateResourceLabels(args.Name, hostStatefulSetName, args.Name, nil) + executionPodLabels["app"] = pulumi.String(hostStatefulSetName) // Define the StatefulSet for the 'reth' container with a configmap volume and a data persistent volume - _, err = appsv1.NewStatefulSet(ctx, fmt.Sprintf("%s-set", hostStatefulSetName), &appsv1.StatefulSetArgs{ + _, err = appsv1.NewStatefulSet(ctx, hostStatefulSetResourceName, &appsv1.StatefulSetArgs{ Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.Sprintf("%s", hostStatefulSetName), - Labels: pulumi.StringMap{ - "app": pulumi.Sprintf("%s-set", hostStatefulSetName), - "app.kubernetes.io/name": pulumi.Sprintf("%s-set", hostStatefulSetName), - "app.kubernetes.io/part-of": pulumi.Sprintf("%s", hostStatefulSetName), - }, + Name: pulumi.String(hostStatefulSetName), + Labels: utils.CreateResourceLabels(args.Name, hostStatefulSetResourceName, args.Name, nil), Namespace: args.Namespace, }, Spec: &appsv1.StatefulSetSpecArgs{ Replicas: pulumi.Int(1), Selector: &metav1.LabelSelectorArgs{ MatchLabels: pulumi.StringMap{ - "app": pulumi.Sprintf("%s", hostStatefulSetName), + "app": pulumi.String(hostStatefulSetName), }, }, Template: &corev1.PodTemplateSpecArgs{ Metadata: &metav1.ObjectMetaArgs{ - Labels: pulumi.StringMap{ - "app": pulumi.Sprintf("%s", hostStatefulSetName), - "app.kubernetes.io/name": pulumi.Sprintf("%s", hostStatefulSetName), - "app.kubernetes.io/part-of": pulumi.Sprintf("%s", hostStatefulSetName), - }, + Labels: executionPodLabels, Namespace: args.Namespace, }, Spec: &corev1.PodSpecArgs{ Containers: corev1.ContainerArray{ corev1.ContainerArgs{ - Name: pulumi.Sprintf("%s", hostStatefulSetName), + Name: pulumi.String(hostStatefulSetName), Image: args.ExecutionClientImage, ImagePullPolicy: pulumi.String("Always"), Command: pulumi.StringArray{ @@ -274,16 +245,7 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu MountPath: pulumi.String("/etc/reth/execution-jwt"), }, }, - Resources: &corev1.ResourceRequirementsArgs{ - Limits: pulumi.StringMap{ - "cpu": pulumi.String("2"), - "memory": pulumi.String("16Gi"), - }, - Requests: pulumi.StringMap{ - "cpu": pulumi.String("2"), - "memory": pulumi.String("4Gi"), - }, - }, + Resources: NewResourceRequirements("2", "16Gi", "2", "4Gi"), }, }, Volumes: corev1.VolumeArray{ @@ -317,25 +279,14 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu // LIGHTHOUSE consensusClientName := "lighthouse" - _, err = corev1.NewPersistentVolumeClaim(ctx, fmt.Sprintf("%s-data", "real-lighthouse-db"), &corev1.PersistentVolumeClaimArgs{ - Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.String("real-lighthouse-data"), - Labels: pulumi.StringMap{ - "app.kubernetes.io/name": pulumi.String("real-lighthouse-data"), - "app.kubernetes.io/part-of": pulumi.Sprintf("%s", "real-lighthouse-db"), - }, - Namespace: args.Namespace, - }, - Spec: &corev1.PersistentVolumeClaimSpecArgs{ - AccessModes: pulumi.StringArray{pulumi.String("ReadWriteOnce")}, - Resources: &corev1.VolumeResourceRequirementsArgs{ - Requests: pulumi.StringMap{ - "storage": storageSize, - }, - }, - StorageClassName: pulumi.String("aws-gp3"), - }, - }, pulumi.Parent(component)) + _, err = CreatePersistentVolumeClaim( + ctx, + "real-lighthouse-data", + args.Namespace, + storageSize, + "aws-gp3", + component, + ) if err != nil { return nil, fmt.Errorf("failed to create lighthouse data pvc: %w", err) } @@ -345,14 +296,12 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu "EXAMPLE": pulumi.String("example"), } + consensusConfigMapName := "consensus-configmap-env-config" consensusConfigMap, err := utils.CreateConfigMap( ctx, - "consensus-configmap-env-config", + consensusConfigMapName, args.Namespace, - pulumi.StringMap{ - "app.kubernetes.io/name": pulumi.String("consensus-configmap-env-config"), - "app.kubernetes.io/part-of": pulumi.String("consensus-configmap"), - }, + utils.CreateResourceLabels(args.Name, consensusConfigMapName, args.Name, nil), consensusEnv, ) if err != nil { @@ -360,9 +309,10 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu } component.LighthouseConfigMap = consensusConfigMap - lighthouseInternalService, err := corev1.NewService(ctx, fmt.Sprintf("%s-service", consensusClientName), &corev1.ServiceArgs{ + lighthouseServiceName := fmt.Sprintf("%s-service", consensusClientName) + lighthouseInternalService, err := corev1.NewService(ctx, lighthouseServiceName, &corev1.ServiceArgs{ Spec: &corev1.ServiceSpecArgs{ - Selector: pulumi.StringMap{"app": pulumi.Sprintf("%s", consensusClientName)}, + Selector: pulumi.StringMap{"app": pulumi.String(consensusClientName)}, Type: pulumi.String("ClusterIP"), Ports: corev1.ServicePortArray{ corev1.ServicePortArgs{ @@ -399,12 +349,9 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu }, }, Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.Sprintf("%s-service", consensusClientName), + Name: pulumi.String(lighthouseServiceName), Namespace: args.Namespace, - Labels: pulumi.StringMap{ - "app.kubernetes.io/name": pulumi.Sprintf("%s-service", consensusClientName), - "app.kubernetes.io/part-of": pulumi.Sprintf("%s", consensusClientName), - }, + Labels: utils.CreateResourceLabels(args.Name, lighthouseServiceName, args.Name, nil), }, }, pulumi.Parent(component)) if err != nil { @@ -414,36 +361,34 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu lighthouseServiceIpString := lighthouseInternalService.Spec.ClusterIP().Elem() lighthouseStatefulSet := "lighthouse" + lighthouseStatefulSetResourceName := fmt.Sprintf("%s-set", lighthouseStatefulSet) - _, err = appsv1.NewStatefulSet(ctx, fmt.Sprintf("%s-set", lighthouseStatefulSet), &appsv1.StatefulSetArgs{ + // Create pod labels with app label for stateful set + lighthousePodLabels := utils.CreateResourceLabels(args.Name, lighthouseStatefulSet, args.Name, nil) + lighthousePodLabels["app"] = pulumi.String(lighthouseStatefulSet) + + _, err = appsv1.NewStatefulSet(ctx, lighthouseStatefulSetResourceName, &appsv1.StatefulSetArgs{ Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.Sprintf("%s", lighthouseStatefulSet), - Labels: pulumi.StringMap{ - "app.kubernetes.io/name": pulumi.Sprintf("%s-set", lighthouseStatefulSet), - "app.kubernetes.io/part-of": pulumi.String("lighthouse"), - }, + Name: pulumi.String(lighthouseStatefulSet), + Labels: utils.CreateResourceLabels(args.Name, lighthouseStatefulSetResourceName, args.Name, nil), Namespace: args.Namespace, }, Spec: &appsv1.StatefulSetSpecArgs{ Replicas: pulumi.Int(1), Selector: &metav1.LabelSelectorArgs{ MatchLabels: pulumi.StringMap{ - "app": pulumi.Sprintf("%s", lighthouseStatefulSet), + "app": pulumi.String(lighthouseStatefulSet), }, }, Template: &corev1.PodTemplateSpecArgs{ Metadata: &metav1.ObjectMetaArgs{ Namespace: args.Namespace, - Labels: pulumi.StringMap{ - "app": pulumi.Sprintf("%s", lighthouseStatefulSet), - "app.kubernetes.io/name": pulumi.Sprintf("%s", lighthouseStatefulSet), - "app.kubernetes.io/part-of": pulumi.String("lighthouse"), - }, + Labels: lighthousePodLabels, }, Spec: &corev1.PodSpecArgs{ Containers: corev1.ContainerArray{ corev1.ContainerArgs{ - Name: pulumi.Sprintf("%s", lighthouseStatefulSet), + Name: pulumi.String(lighthouseStatefulSet), Image: args.ConsensusClientImage, Command: pulumi.StringArray{ pulumi.String("lighthouse"), @@ -512,16 +457,7 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu MountPath: pulumi.String("/secrets"), }, }, - Resources: &corev1.ResourceRequirementsArgs{ - Limits: pulumi.StringMap{ - "cpu": pulumi.String("2"), - "memory": pulumi.String("16Gi"), - }, - Requests: pulumi.StringMap{ - "cpu": pulumi.String("2"), - "memory": pulumi.String("4Gi"), - }, - }, + Resources: NewResourceRequirements("2", "16Gi", "2", "4Gi"), }, }, DnsPolicy: pulumi.String("ClusterFirst"), @@ -557,12 +493,14 @@ func NewSignetNode(ctx *pulumi.Context, args SignetNodeComponentArgs, opts ...pu // This enables the service mesh to route traffic from rpc.havarti.signet.sh // to the signet-rpc service in the cluster // VirtualService spec definition: https://istio.io/latest/docs/reference/config/networking/virtual-service/ + virtualServiceName := "signet-rpc" _, err = crd.NewCustomResource(ctx, "signet-rpc-vservice", &crd.CustomResourceArgs{ ApiVersion: pulumi.String("networking.istio.io/v1alpha3"), Kind: pulumi.String("VirtualService"), Metadata: &metav1.ObjectMetaArgs{ - Name: pulumi.String("signet-rpc"), + Name: pulumi.String(virtualServiceName), Namespace: args.Namespace, + Labels: utils.CreateResourceLabels(args.Name, virtualServiceName, args.Name, nil), }, OtherFields: map[string]interface{}{ "spec": map[string]interface{}{ diff --git a/pkg/utils/labels.go b/pkg/utils/labels.go new file mode 100644 index 0000000..a6c9c20 --- /dev/null +++ b/pkg/utils/labels.go @@ -0,0 +1,22 @@ +package utils + +import ( + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// CreateResourceLabels creates a consistent set of Kubernetes labels for resources +func CreateResourceLabels(app, name, partOf string, additionalLabels pulumi.StringMap) pulumi.StringMap { + labels := pulumi.StringMap{ + "app.kubernetes.io/name": pulumi.String(name), + "app.kubernetes.io/part-of": pulumi.String(partOf), + } + + // Merge additional labels if provided + if additionalLabels != nil { + for k, v := range additionalLabels { + labels[k] = v + } + } + + return labels +} diff --git a/pkg/utils/labels_test.go b/pkg/utils/labels_test.go new file mode 100644 index 0000000..a747e6a --- /dev/null +++ b/pkg/utils/labels_test.go @@ -0,0 +1,53 @@ +package utils + +import ( + "testing" + + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + "github.com/stretchr/testify/assert" +) + +func TestCreateResourceLabels(t *testing.T) { + // Test with standard labels + appName := "test-app" + resourceName := "test-resource" + partOf := "test-system" + + labels := CreateResourceLabels(appName, resourceName, partOf, nil) + + // The "app" label is no longer included in standard labels per requirements + assert.Equal(t, pulumi.String(resourceName), labels["app.kubernetes.io/name"]) + assert.Equal(t, pulumi.String(partOf), labels["app.kubernetes.io/part-of"]) + + // Test with additional labels + additionalLabels := pulumi.StringMap{ + "app": pulumi.String(appName), // Now app should be explicitly provided as an additional label + "environment": pulumi.String("production"), + "tier": pulumi.String("backend"), + "custom-label": pulumi.String("custom-value"), + } + + mergedLabels := CreateResourceLabels(appName, resourceName, partOf, additionalLabels) + + // Verify standard labels + assert.Equal(t, pulumi.String(appName), mergedLabels["app"]) // Now comes from additional labels + assert.Equal(t, pulumi.String(resourceName), mergedLabels["app.kubernetes.io/name"]) + assert.Equal(t, pulumi.String(partOf), mergedLabels["app.kubernetes.io/part-of"]) + + // Verify additional labels were merged + assert.Equal(t, pulumi.String("production"), mergedLabels["environment"]) + assert.Equal(t, pulumi.String("backend"), mergedLabels["tier"]) + assert.Equal(t, pulumi.String("custom-value"), mergedLabels["custom-label"]) + + // Test override behavior + overrideLabels := pulumi.StringMap{ + "app": pulumi.String("override-app"), + } + + overriddenLabels := CreateResourceLabels(appName, resourceName, partOf, overrideLabels) + + // Verify the override behavior + assert.Equal(t, pulumi.String("override-app"), overriddenLabels["app"]) // Comes from overrideLabels + assert.Equal(t, pulumi.String(resourceName), overriddenLabels["app.kubernetes.io/name"]) + assert.Equal(t, pulumi.String(partOf), overriddenLabels["app.kubernetes.io/part-of"]) +}