From 7de33ff1276fa5de2c3ab00a81ebe84672ceb8fd Mon Sep 17 00:00:00 2001 From: Matthias Bertschy Date: Mon, 11 Aug 2025 12:26:51 +0200 Subject: [PATCH] allow specifying application profiles in labels Signed-off-by: Matthias Bertschy --- go.mod | 2 +- go.sum | 4 +- pkg/containerprofilemanager/v1/lifecycle.go | 12 +++++ .../applicationprofilecache.go | 45 ++++++++++++++++--- pkg/objectcache/shared_container_data.go | 12 +++++ 5 files changed, 66 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 8be2cb696..d3fe719fb 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/joncrlsn/dque v0.0.0-20241024143830-7723fd131a64 github.com/kubescape/backend v0.0.25 github.com/kubescape/go-logger v0.0.24 - github.com/kubescape/k8s-interface v0.0.198 + github.com/kubescape/k8s-interface v0.0.200 github.com/kubescape/storage v0.0.200 github.com/kubescape/workerpool v0.0.0-20250526074519-0e4a4e7f44cf github.com/moby/sys/mountinfo v0.7.2 diff --git a/go.sum b/go.sum index 7271cbef7..5ab15b16d 100644 --- a/go.sum +++ b/go.sum @@ -1389,8 +1389,8 @@ github.com/kubescape/backend v0.0.25 h1:PLESA7KGJskebR5hiSqPeJ1cPQ8Ra+4yNYXKyIej github.com/kubescape/backend v0.0.25/go.mod h1:FpazfN+c3Ucuvv4jZYCnk99moSBRNMVIxl5aWCZAEBo= github.com/kubescape/go-logger v0.0.24 h1:JRNlblY16Ty7hD6MSYNPvWYDxNzVAufsDDX/sZJayL0= github.com/kubescape/go-logger v0.0.24/go.mod h1:sMPVCr3VpW/e+SeMaXig5kClGvmZbDXN8YktUeNU4nY= -github.com/kubescape/k8s-interface v0.0.198 h1:U7PNTyS9ZE9ZkSrLMclLO7Sz4grf/2CLbmpVT6Hc0nU= -github.com/kubescape/k8s-interface v0.0.198/go.mod h1:j9snZbH+RxOaa1yG/bWgTClj90q7To0rGgQepxy4b+k= +github.com/kubescape/k8s-interface v0.0.200 h1:Ff64dlDigg8dDYJuaeLFFjfTCHQNC1SStWNECWFRCYE= +github.com/kubescape/k8s-interface v0.0.200/go.mod h1:j9snZbH+RxOaa1yG/bWgTClj90q7To0rGgQepxy4b+k= github.com/kubescape/storage v0.0.200 h1:gLCPiAPxDii03Jo326Ye0qx1cXOAz6KH+A9B0WuL1CE= github.com/kubescape/storage v0.0.200/go.mod h1:uv4LMQjcTYIn7bgyMFGc0UBZ3gxdl7MNixPSjALP08E= github.com/kubescape/workerpool v0.0.0-20250526074519-0e4a4e7f44cf h1:hI0jVwrB6fT4GJWvuUjzObfci1CUknrZdRHfnRVtKM0= diff --git a/pkg/containerprofilemanager/v1/lifecycle.go b/pkg/containerprofilemanager/v1/lifecycle.go index 249219582..d3c7046bb 100644 --- a/pkg/containerprofilemanager/v1/lifecycle.go +++ b/pkg/containerprofilemanager/v1/lifecycle.go @@ -74,6 +74,18 @@ func (cpm *ContainerProfileManager) addContainer(container *containercollection. return fmt.Errorf("failed to get shared data for container %s: %w", containerID, err) } + // Check if the container should use a user-defined profile + if sharedData.UserDefinedProfile != "" { + logger.L().Debug("ignoring container with a user-defined profile", + helpers.String("containerID", containerID), + helpers.String("containerName", container.Runtime.ContainerName), + helpers.String("podName", container.K8s.PodName), + helpers.String("namespace", container.K8s.Namespace), + helpers.String("userDefinedProfile", sharedData.UserDefinedProfile)) + cpm.removeContainerEntry(containerID) + return nil + } + // Ignore ephemeral containers if sharedData.ContainerType == objectcache.EphemeralContainer { logger.L().Debug("ignoring ephemeral container", diff --git a/pkg/objectcache/applicationprofilecache/applicationprofilecache.go b/pkg/objectcache/applicationprofilecache/applicationprofilecache.go index 05c6ce508..f9059185a 100644 --- a/pkg/objectcache/applicationprofilecache/applicationprofilecache.go +++ b/pkg/objectcache/applicationprofilecache/applicationprofilecache.go @@ -31,6 +31,7 @@ type ContainerInfo struct { InstanceTemplateHash string Namespace string Name string + UserDefinedProfile string } // ContainerCallStackIndex maintains call stack search trees for a container @@ -153,7 +154,7 @@ func (apc *ApplicationProfileCacheImpl) updateAllProfiles(ctx context.Context) { // Get the workload ID from profile workloadID := apc.wlidKey(profile.Annotations[helpersv1.WlidMetadataKey], profile.Labels[helpersv1.TemplateHashKey]) if workloadID == "" { - continue + continue // this is the case for user-defined profiles } // Update profile state regardless of whether we'll update the full profile @@ -435,6 +436,11 @@ func (apc *ApplicationProfileCacheImpl) addContainer(container *containercollect return nil } + // Create workload ID to state mapping + if _, exists := apc.workloadIDToProfileState.Load(workloadID); !exists { + apc.workloadIDToProfileState.Set(workloadID, nil) + } + // Create container info containerInfo := &ContainerInfo{ ContainerID: containerID, @@ -444,14 +450,41 @@ func (apc *ApplicationProfileCacheImpl) addContainer(container *containercollect Name: container.Runtime.ContainerName, } + // Check for user-defined profile + if userDefinedProfile, ok := container.K8s.PodLabels[helpersv1.UserDefinedProfileMetadataKey]; ok { + if userDefinedProfile != "" { + // Set the user-defined profile in container info + containerInfo.UserDefinedProfile = userDefinedProfile + // Fetch the profile from storage + // TODO should we cache user-defined profiles separately? - it could allow deduplication + fullProfile, err := apc.storageClient.ApplicationProfiles(container.K8s.Namespace).Get(ctx, userDefinedProfile, metav1.GetOptions{}) + if err != nil { + logger.L().Error("failed to get user-defined profile", + helpers.String("containerID", containerID), + helpers.String("workloadID", workloadID), + helpers.String("namespace", container.K8s.Namespace), + helpers.String("profileName", userDefinedProfile), + helpers.Error(err)) + // Update the profile state to indicate an error + profileState := &objectcache.ProfileState{ + Error: err, + } + apc.workloadIDToProfileState.Set(workloadID, profileState) + return nil + } + // Update the profile in the cache + apc.workloadIDToProfile.Set(workloadID, fullProfile) + logger.L().Debug("added user-defined profile to cache", + helpers.String("containerID", containerID), + helpers.String("workloadID", workloadID), + helpers.String("namespace", container.K8s.Namespace), + helpers.String("profileName", userDefinedProfile)) + } + } + // Add to container info map apc.containerIDToInfo.Set(containerID, containerInfo) - // Create workload ID to state mapping - if _, exists := apc.workloadIDToProfileState.Load(workloadID); !exists { - apc.workloadIDToProfileState.Set(workloadID, nil) - } - logger.L().Debug("container added to cache", helpers.String("containerID", containerID), helpers.String("workloadID", workloadID), diff --git a/pkg/objectcache/shared_container_data.go b/pkg/objectcache/shared_container_data.go index 64509543b..f8cb7de5e 100644 --- a/pkg/objectcache/shared_container_data.go +++ b/pkg/objectcache/shared_container_data.go @@ -77,6 +77,7 @@ type WatchedContainerData struct { SeriesID string PreviousReportTimestamp time.Time CurrentReportTimestamp time.Time + UserDefinedProfile string } type ContainerInfo struct { @@ -136,6 +137,17 @@ func (watchedContainer *WatchedContainerData) SetCompletionStatus(newStatus Watc } func (watchedContainer *WatchedContainerData) SetContainerInfo(wl workloadinterface.IWorkload, containerName string) error { + labels := wl.GetPodLabels() + // check for user defined profile + if userDefinedProfile, ok := labels[helpersv1.UserDefinedProfileMetadataKey]; ok { + if userDefinedProfile != "" { + logger.L().Info("container has a user defined profile", + helpers.String("profile", userDefinedProfile), + helpers.String("container", containerName), + helpers.String("workload", wl.GetName())) + watchedContainer.UserDefinedProfile = userDefinedProfile + } + } podSpec, err := wl.GetPodSpec() if err != nil { return fmt.Errorf("failed to get pod spec: %w", err)