diff --git a/.github/workflows/bypass.yaml b/.github/workflows/bypass.yaml index 1676281cb..288e16b2c 100644 --- a/.github/workflows/bypass.yaml +++ b/.github/workflows/bypass.yaml @@ -26,7 +26,7 @@ jobs: CGO_ENABLED: 0 GO111MODULE: "on" BUILD_PLATFORM: linux/amd64,linux/arm64 - GO_VERSION: "1.24" + GO_VERSION: "1.25" REQUIRED_TESTS: '' FORCE: true COSIGN: true diff --git a/.github/workflows/component-tests.yaml b/.github/workflows/component-tests.yaml index d009e9a13..db540dad9 100644 --- a/.github/workflows/component-tests.yaml +++ b/.github/workflows/component-tests.yaml @@ -104,7 +104,7 @@ jobs: CGO_ENABLED: 0 uses: actions/setup-go@v4 with: - go-version: "1.24" + go-version: "1.25" - name: Run test run: | cd tests && go test -v ./... -run ${{ matrix.test }} --timeout=20m --tags=component diff --git a/.github/workflows/pr-created.yaml b/.github/workflows/pr-created.yaml index 3616b1d08..645016f9f 100644 --- a/.github/workflows/pr-created.yaml +++ b/.github/workflows/pr-created.yaml @@ -15,6 +15,6 @@ jobs: pr-created: uses: kubescape/workflows/.github/workflows/incluster-comp-pr-created.yaml@main with: - GO_VERSION: "1.24" + GO_VERSION: "1.25" CGO_ENABLED: 0 secrets: inherit diff --git a/.github/workflows/pr-merged.yaml b/.github/workflows/pr-merged.yaml index ee6362489..37f427a8d 100644 --- a/.github/workflows/pr-merged.yaml +++ b/.github/workflows/pr-merged.yaml @@ -35,7 +35,7 @@ jobs: CGO_ENABLED: 0 GO111MODULE: "on" BUILD_PLATFORM: linux/amd64,linux/arm64 - GO_VERSION: "1.24" + GO_VERSION: "1.25" REQUIRED_TESTS: '[ "relevantCVEs", "relevancy_enabled_stop_sniffing", diff --git a/build/Dockerfile b/build/Dockerfile index 1d84a688b..3893f2ed5 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=$BUILDPLATFORM golang:1.24-bookworm AS builder +FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS builder ENV GO111MODULE=on CGO_ENABLED=0 WORKDIR /work diff --git a/cmd/main.go b/cmd/main.go index ff128fc93..81d5c03c8 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -13,7 +13,10 @@ import ( apitypes "github.com/armosec/armoapi-go/armotypes" utilsmetadata "github.com/armosec/utils-k8s-go/armometadata" "github.com/cilium/ebpf/rlimit" + mapset "github.com/deckarep/golang-set/v2" + "github.com/goradd/maps" "github.com/grafana/pyroscope-go" + igconfig "github.com/inspektor-gadget/inspektor-gadget/pkg/config" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" beUtils "github.com/kubescape/backend/pkg/utils" @@ -24,6 +27,7 @@ import ( "github.com/kubescape/node-agent/pkg/config" "github.com/kubescape/node-agent/pkg/containerprofilemanager" containerprofilemanagerv1 "github.com/kubescape/node-agent/pkg/containerprofilemanager/v1" + "github.com/kubescape/node-agent/pkg/containerwatcher" containerwatcherv2 "github.com/kubescape/node-agent/pkg/containerwatcher/v2" "github.com/kubescape/node-agent/pkg/dnsmanager" "github.com/kubescape/node-agent/pkg/exporters" @@ -48,8 +52,11 @@ import ( rulebinding "github.com/kubescape/node-agent/pkg/rulebindingmanager" rulebindingcachev1 "github.com/kubescape/node-agent/pkg/rulebindingmanager/cache" "github.com/kubescape/node-agent/pkg/rulemanager" - rulemanagerv1 "github.com/kubescape/node-agent/pkg/rulemanager/v1" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/rulecooldown" + "github.com/kubescape/node-agent/pkg/rulemanager/cel" + "github.com/kubescape/node-agent/pkg/rulemanager/ruleadapters" + "github.com/kubescape/node-agent/pkg/rulemanager/rulecooldown" + "github.com/kubescape/node-agent/pkg/rulemanager/rulecreator" + "github.com/kubescape/node-agent/pkg/rulemanager/ruleswatcher" "github.com/kubescape/node-agent/pkg/sbommanager" sbommanagerv1 "github.com/kubescape/node-agent/pkg/sbommanager/v1" "github.com/kubescape/node-agent/pkg/seccompmanager" @@ -167,7 +174,6 @@ func main() { // Create watchers dWatcher := dynamicwatcher.NewWatchHandler(k8sClient, storageClient.StorageClient, cfg.SkipNamespace) - // create k8sObject cache k8sObjectCache, err := k8scache.NewK8sObjectCache(cfg.NodeName, k8sClient) if err != nil { logger.L().Ctx(ctx).Fatal("error creating K8sObjectCache", helpers.Error(err)) @@ -189,7 +195,12 @@ func main() { var ruleBindingCache *rulebindingcachev1.RBCache if cfg.EnableRuntimeDetection { - ruleBindingCache = rulebindingcachev1.NewCache(cfg.NodeName, k8sClient) + ruleCreator := rulecreator.NewRuleCreator() + ruleBindingCache = rulebindingcachev1.NewCache(cfg, k8sClient, ruleCreator) + rulesWatcher := ruleswatcher.NewRulesWatcher(k8sClient, ruleCreator, func() { + ruleBindingCache.RefreshRuleBindingsRules() + }) + dWatcher.AddAdaptor(rulesWatcher) } // Create and DNS managers @@ -268,8 +279,15 @@ func main() { ruleCooldown := rulecooldown.NewRuleCooldown(cfg.RuleCoolDown) + adapterFactory := ruleadapters.NewEventRuleAdapterFactory() + + celEvaluator, err := cel.NewCEL(objCache, cfg) + if err != nil { + logger.L().Ctx(ctx).Fatal("error creating CEL evaluator", helpers.Error(err)) + } + // create runtimeDetection managers - ruleManager, err = rulemanagerv1.CreateRuleManager(ctx, cfg, k8sClient, ruleBindingCache, objCache, exporter, prometheusExporter, cfg.NodeName, clusterData.ClusterName, processTreeManager, dnsResolver, nil, ruleCooldown) + ruleManager, err = rulemanager.CreateRuleManager(ctx, cfg, k8sClient, ruleBindingCache, objCache, exporter, prometheusExporter, processTreeManager, dnsResolver, nil, ruleCooldown, adapterFactory, celEvaluator) if err != nil { logger.L().Ctx(ctx).Fatal("error creating RuleManager", helpers.Error(err)) } @@ -339,11 +357,16 @@ func main() { sbomManager = sbommanager.CreateSbomManagerMock() } + thirdPartyTracers := containerwatcher.ThirdPartyTracers{ + ThirdPartyTracersInitializers: mapset.NewSet[containerwatcher.CustomTracerInitializer](), + ThirdPartyEventReceivers: maps.NewSafeMap[utils.EventType, mapset.Set[containerwatcher.GenericEventReceiver]](), + } + // Create the container handler mainHandler, err := containerwatcherv2.CreateIGContainerWatcher(cfg, containerProfileManager, k8sClient, igK8sClient, dnsManagerClient, prometheusExporter, ruleManager, - malwareManager, sbomManager, &ruleBindingNotify, igK8sClient.RuntimeConfig, nil, nil, - processTreeManager, clusterData.ClusterName, objCache, networkStreamClient, containerProcessTree) + malwareManager, sbomManager, &ruleBindingNotify, igK8sClient.RuntimeConfig, nil, + processTreeManager, clusterData.ClusterName, objCache, networkStreamClient, containerProcessTree, thirdPartyTracers) if err != nil { logger.L().Ctx(ctx).Fatal("error creating the container watcher", helpers.Error(err)) } diff --git a/go.mod b/go.mod index 3a8971d11..a5da31825 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,10 @@ module github.com/kubescape/node-agent -go 1.24.0 - -toolchain go1.24.3 +go 1.25.0 require ( github.com/DmitriyVTitov/size v1.5.0 + github.com/Masterminds/semver/v3 v3.3.1 github.com/anchore/syft v1.18.1 github.com/aquilax/truncate v1.0.0 github.com/armosec/armoapi-go v0.0.605 @@ -23,6 +22,7 @@ require ( github.com/dutchcoders/go-clamd v0.0.0-20170520113014-b970184f4d9e github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/go-openapi/strfmt v0.23.0 + github.com/google/cel-go v0.23.2 github.com/google/go-containerregistry v0.20.3 github.com/google/uuid v1.6.0 github.com/goradd/maps v1.0.0 @@ -41,6 +41,7 @@ require ( github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 github.com/panjf2000/ants/v2 v2.11.3 + github.com/picatz/xcel v0.0.0-20250816143731-885b5f678a12 github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.22.0 github.com/prometheus/procfs v0.16.1 @@ -66,6 +67,7 @@ require ( ) require ( + cel.dev/expr v0.20.0 // indirect dario.cat/mergo v1.0.1 // indirect git.sr.ht/~sbinet/gg v0.5.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect @@ -76,7 +78,6 @@ require ( github.com/DataDog/zstd v1.5.5 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Microsoft/hcsshim v0.12.9 // indirect @@ -97,6 +98,7 @@ require ( github.com/anchore/packageurl-go v0.1.1-0.20241018175412-5c22e6360c4f // indirect github.com/anchore/stereoscope v0.0.11 // indirect github.com/andybalholm/brotli v1.1.1 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 // indirect github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 // indirect github.com/armosec/gojay v1.2.17 // indirect @@ -283,6 +285,7 @@ require ( github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/pflag v1.0.6 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/stripe/stripe-go/v74 v74.30.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect @@ -375,3 +378,5 @@ replace github.com/vishvananda/netns => github.com/inspektor-gadget/netns v0.0.5 replace github.com/mholt/archiver/v3 v3.5.1 => github.com/anchore/archiver/v3 v3.5.2 replace github.com/inspektor-gadget/inspektor-gadget => github.com/amirmalka/inspektor-gadget v0.40.1-0.20250814111737-3a58864c8d86 + +replace github.com/picatz/xcel => github.com/matthyx/xcel v0.0.0-20250820140400-f8fabef4e2af diff --git a/go.sum b/go.sum index 89317c5c6..d47889e96 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -144,6 +146,8 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 h1:vmXNl+HDfqqXgr0uY1UgK1GAhps8nbAAtqHNBcgyf+4= github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46/go.mod h1:olhPNdiiAAMiSujemd1O/sc6GcyePr23f/6uGKtthNg= github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 h1:rcEG5HI490FF0a7zuvxOxen52ddygCfNVjP0XOCMl+M= @@ -515,6 +519,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -732,6 +738,8 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/matthyx/xcel v0.0.0-20250820140400-f8fabef4e2af h1:rnyCjEsGq/kiMPFwuvj6o/09wgNXqUAAcft9g/2nJXw= +github.com/matthyx/xcel v0.0.0-20250820140400-f8fabef4e2af/go.mod h1:C07puiFpDU4BthRjPfwHYimZm9P072JH6qWAaqPVyQY= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -1024,6 +1032,8 @@ github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= diff --git a/pkg/config/config.go b/pkg/config/config.go index 72873b472..38d3c4695 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -6,10 +6,10 @@ import ( "strings" "time" - "github.com/kubescape/node-agent/pkg/containerwatcher" "github.com/kubescape/node-agent/pkg/exporters" processtreecreator "github.com/kubescape/node-agent/pkg/processtree/config" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/rulecooldown" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" + "github.com/kubescape/node-agent/pkg/rulemanager/rulecooldown" "github.com/spf13/viper" ) @@ -18,68 +18,70 @@ const PodNameEnvVar = "POD_NAME" const NamespaceEnvVar = "NAMESPACE_NAME" type Config struct { - Exporters exporters.ExportersConfig `mapstructure:"exporters"` - InitialDelay time.Duration `mapstructure:"initialDelay"` - MaxSniffingTime time.Duration `mapstructure:"maxSniffingTimePerContainer"` - UpdateDataPeriod time.Duration `mapstructure:"updateDataPeriod"` - MaxDelaySeconds int `mapstructure:"maxDelaySeconds"` - MaxJitterPercentage int `mapstructure:"maxJitterPercentage"` - MaxImageSize int64 `mapstructure:"maxImageSize"` - MaxSBOMSize int `mapstructure:"maxSBOMSize"` - MaxTsProfileSize int64 `mapstructure:"maxTsProfileSize"` - EnableFullPathTracing bool `mapstructure:"fullPathTracingEnabled"` - EnableApplicationProfile bool `mapstructure:"applicationProfileServiceEnabled"` - EnableMalwareDetection bool `mapstructure:"malwareDetectionEnabled"` - EnablePrometheusExporter bool `mapstructure:"prometheusExporterEnabled"` - EnableRuntimeDetection bool `mapstructure:"runtimeDetectionEnabled"` - EnableHttpDetection bool `mapstructure:"httpDetectionEnabled"` - EnableNetworkTracing bool `mapstructure:"networkServiceEnabled"` - EnableNetworkStreaming bool `mapstructure:"networkStreamingEnabled"` - EnableNodeProfile bool `mapstructure:"nodeProfileServiceEnabled"` - EnableHostMalwareSensor bool `mapstructure:"hostMalwareSensorEnabled"` - EnableHostNetworkSensor bool `mapstructure:"hostNetworkSensorEnabled"` - NodeProfileInterval time.Duration `mapstructure:"nodeProfileInterval"` - EnableSeccomp bool `mapstructure:"seccompServiceEnabled"` - ExcludeLabels map[string]string `mapstructure:"excludeLabels"` - ExcludeNamespaces []string `mapstructure:"excludeNamespaces"` - IncludeNamespaces []string `mapstructure:"includeNamespaces"` - EnableSbomGeneration bool `mapstructure:"sbomGenerationEnabled"` - EnableEmbeddedSboms bool `mapstructure:"enableEmbeddedSBOMs"` - NamespaceName string `mapstructure:"namespaceName"` - NodeName string `mapstructure:"nodeName"` - PodName string `mapstructure:"podName"` - KubernetesMode bool `mapstructure:"kubernetesMode"` - NetworkStreamingInterval time.Duration `mapstructure:"networkStreamingInterval"` - WorkerPoolSize int `mapstructure:"workerPoolSize"` - WorkerChannelSize int `mapstructure:"workerChannelSize"` - BlockEvents bool `mapstructure:"blockEvents"` - EventBatchSize int `mapstructure:"eventBatchSize"` - TestMode bool `mapstructure:"testMode"` - ExcludeJsonPaths []string `mapstructure:"excludeJsonPaths"` - ProfilesCacheRefreshRate time.Duration `mapstructure:"profilesCacheRefreshRate"` - RuleCoolDown rulecooldown.RuleCooldownConfig `mapstructure:"ruleCooldown"` - EnablePartialProfileGeneration bool `mapstructure:"partialProfileGenerationEnabled"` - ProcfsScanInterval time.Duration `mapstructure:"procfsScanInterval"` - ProcfsPidScanInterval time.Duration `mapstructure:"procfsPidScanInterval"` - OrderedEventQueue containerwatcher.OrderedEventQueueConfig `mapstructure:"orderedEventQueue"` - ExitCleanup processtreecreator.ExitCleanupConfig `mapstructure:"exitCleanup"` - DNSCacheSize int `mapstructure:"dnsCacheSize"` - DCapSys bool `mapstructure:"dCapSys"` - DDns bool `mapstructure:"dDns"` - DExec bool `mapstructure:"dExec"` - DExit bool `mapstructure:"dExit"` - DFork bool `mapstructure:"dFork"` - DHardlink bool `mapstructure:"dHardlink"` - DHttp bool `mapstructure:"dHttp"` - DIouring bool `mapstructure:"dIouring"` - DNetwork bool `mapstructure:"dNetwork"` - DOpen bool `mapstructure:"dOpen"` - DPtrace bool `mapstructure:"dPtrace"` - DRandomx bool `mapstructure:"dRandomx"` - DSeccomp bool `mapstructure:"dSeccomp"` - DSsh bool `mapstructure:"dSsh"` - DSymlink bool `mapstructure:"dSymlink"` - DTop bool `mapstructure:"dTop"` + Exporters exporters.ExportersConfig `mapstructure:"exporters"` + InitialDelay time.Duration `mapstructure:"initialDelay"` + MaxSniffingTime time.Duration `mapstructure:"maxSniffingTimePerContainer"` + UpdateDataPeriod time.Duration `mapstructure:"updateDataPeriod"` + MaxDelaySeconds int `mapstructure:"maxDelaySeconds"` + MaxJitterPercentage int `mapstructure:"maxJitterPercentage"` + MaxImageSize int64 `mapstructure:"maxImageSize"` + MaxSBOMSize int `mapstructure:"maxSBOMSize"` + MaxTsProfileSize int64 `mapstructure:"maxTsProfileSize"` + EnableFullPathTracing bool `mapstructure:"fullPathTracingEnabled"` + EnableApplicationProfile bool `mapstructure:"applicationProfileServiceEnabled"` + EnableMalwareDetection bool `mapstructure:"malwareDetectionEnabled"` + EnablePrometheusExporter bool `mapstructure:"prometheusExporterEnabled"` + EnableRuntimeDetection bool `mapstructure:"runtimeDetectionEnabled"` + EnableHttpDetection bool `mapstructure:"httpDetectionEnabled"` + EnableNetworkTracing bool `mapstructure:"networkServiceEnabled"` + EnableNetworkStreaming bool `mapstructure:"networkStreamingEnabled"` + EnableNodeProfile bool `mapstructure:"nodeProfileServiceEnabled"` + EnableHostMalwareSensor bool `mapstructure:"hostMalwareSensorEnabled"` + EnableHostNetworkSensor bool `mapstructure:"hostNetworkSensorEnabled"` + NodeProfileInterval time.Duration `mapstructure:"nodeProfileInterval"` + EnableSeccomp bool `mapstructure:"seccompServiceEnabled"` + ExcludeLabels map[string]string `mapstructure:"excludeLabels"` + ExcludeNamespaces []string `mapstructure:"excludeNamespaces"` + IncludeNamespaces []string `mapstructure:"includeNamespaces"` + EnableSbomGeneration bool `mapstructure:"sbomGenerationEnabled"` + EnableEmbeddedSboms bool `mapstructure:"enableEmbeddedSBOMs"` + NamespaceName string `mapstructure:"namespaceName"` + NodeName string `mapstructure:"nodeName"` + PodName string `mapstructure:"podName"` + KubernetesMode bool `mapstructure:"kubernetesMode"` + NetworkStreamingInterval time.Duration `mapstructure:"networkStreamingInterval"` + WorkerPoolSize int `mapstructure:"workerPoolSize"` + WorkerChannelSize int `mapstructure:"workerChannelSize"` + BlockEvents bool `mapstructure:"blockEvents"` + EventBatchSize int `mapstructure:"eventBatchSize"` + TestMode bool `mapstructure:"testMode"` + ExcludeJsonPaths []string `mapstructure:"excludeJsonPaths"` + ProfilesCacheRefreshRate time.Duration `mapstructure:"profilesCacheRefreshRate"` + RuleCoolDown rulecooldown.RuleCooldownConfig `mapstructure:"ruleCooldown"` + EnablePartialProfileGeneration bool `mapstructure:"partialProfileGenerationEnabled"` + ProcfsScanInterval time.Duration `mapstructure:"procfsScanInterval"` + ProcfsPidScanInterval time.Duration `mapstructure:"procfsPidScanInterval"` + OrderedEventQueue OrderedEventQueueConfig `mapstructure:"orderedEventQueue"` + ExitCleanup processtreecreator.ExitCleanupConfig `mapstructure:"exitCleanup"` + CelConfigCache cache.FunctionCacheConfig `mapstructure:"celConfigCache"` + IgnoreRuleBindings bool `mapstructure:"ignoreRuleBindings"` + DNSCacheSize int `mapstructure:"dnsCacheSize"` + DCapSys bool `mapstructure:"dCapSys"` + DDns bool `mapstructure:"dDns"` + DExec bool `mapstructure:"dExec"` + DExit bool `mapstructure:"dExit"` + DFork bool `mapstructure:"dFork"` + DHardlink bool `mapstructure:"dHardlink"` + DHttp bool `mapstructure:"dHttp"` + DIouring bool `mapstructure:"dIouring"` + DNetwork bool `mapstructure:"dNetwork"` + DOpen bool `mapstructure:"dOpen"` + DPtrace bool `mapstructure:"dPtrace"` + DRandomx bool `mapstructure:"dRandomx"` + DSeccomp bool `mapstructure:"dSeccomp"` + DSsh bool `mapstructure:"dSsh"` + DSymlink bool `mapstructure:"dSymlink"` + DTop bool `mapstructure:"dTop"` } // LoadConfig reads configuration from file or environment variables. @@ -112,7 +114,7 @@ func LoadConfig(path string) (Config, error) { viper.SetDefault("profilesCacheRefreshRate", 1*time.Minute) viper.SetDefault("ruleCooldown::ruleCooldownDuration", 1*time.Hour) viper.SetDefault("ruleCooldown::ruleCooldownAfterCount", 1) - viper.SetDefault("ruleCooldown::ruleCooldownOnProfileFailure", true) + viper.SetDefault("ruleCooldown::ruleCooldownOnProfileFailure", true) // NOTE: this is deprecated. viper.SetDefault("ruleCooldown::ruleCooldownMaxSize", 10000) viper.SetDefault("partialProfileGenerationEnabled", true) viper.SetDefault("procfsScanInterval", 30*time.Second) @@ -124,6 +126,10 @@ func LoadConfig(path string) (Config, error) { viper.SetDefault("exitCleanup::cleanupDelay", 5*time.Minute) viper.SetDefault("workerChannelSize", 750000) viper.SetDefault("blockEvents", false) + viper.SetDefault("celConfigCache::maxSize", 100000) + viper.SetDefault("celConfigCache::ttl", 1*time.Minute) + viper.SetDefault("ignoreRuleBindings", false) + viper.SetDefault("dnsCacheSize", 50000) viper.AutomaticEnv() @@ -177,3 +183,8 @@ func (c *Config) SkipNamespace(ns string) bool { } return false } + +type OrderedEventQueueConfig struct { + Size int `mapstructure:"size"` + CollectionDelay time.Duration `mapstructure:"collectionDelay"` +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index d26db4e1b..b3b7ee930 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - "github.com/kubescape/node-agent/pkg/containerwatcher" "github.com/kubescape/node-agent/pkg/exporters" processtreecreator "github.com/kubescape/node-agent/pkg/processtree/config" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/rulecooldown" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" + "github.com/kubescape/node-agent/pkg/rulemanager/rulecooldown" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -76,7 +76,7 @@ func TestLoadConfig(t *testing.T) { OnProfileFailure: true, MaxSize: 10000, }, - OrderedEventQueue: containerwatcher.OrderedEventQueueConfig{ + OrderedEventQueue: OrderedEventQueueConfig{ Size: 100000, CollectionDelay: 50 * time.Millisecond, }, @@ -85,6 +85,10 @@ func TestLoadConfig(t *testing.T) { CleanupInterval: 30 * time.Second, CleanupDelay: 5 * time.Minute, }, + CelConfigCache: cache.FunctionCacheConfig{ + MaxSize: 100000, + TTL: 1 * time.Minute, + }, DNSCacheSize: 50000, }, wantErr: false, diff --git a/pkg/containerprofilemanager/v1/event_reporting.go b/pkg/containerprofilemanager/v1/event_reporting.go index c3f691947..33d137eed 100644 --- a/pkg/containerprofilemanager/v1/event_reporting.go +++ b/pkg/containerprofilemanager/v1/event_reporting.go @@ -17,7 +17,6 @@ import ( tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" - "github.com/kubescape/node-agent/pkg/ruleengine/v1" "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" @@ -78,7 +77,7 @@ func (cpm *ContainerProfileManager) ReportFileOpen(containerID string, event eve path = procRegex.ReplaceAllString(path, "/proc/"+dynamicpathdetector.DynamicIdentifier) } - isSensitive := utils.IsSensitivePath(path, ruleengine.SensitiveFiles) + isSensitive := utils.IsSensitivePath(path, []string{}) if cpm.enricher != nil && isSensitive { openIdentifier := utils.CalculateSHA256FileOpenHash(path) go cpm.enricher.EnrichEvent(containerID, &event, openIdentifier) diff --git a/pkg/containerwatcher/container_watcher_interface.go b/pkg/containerwatcher/container_watcher_interface.go index d35402e2d..701751faf 100644 --- a/pkg/containerwatcher/container_watcher_interface.go +++ b/pkg/containerwatcher/container_watcher_interface.go @@ -2,8 +2,9 @@ package containerwatcher import ( "context" - "time" + mapset "github.com/deckarep/golang-set/v2" + "github.com/goradd/maps" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" "github.com/inspektor-gadget/inspektor-gadget/pkg/socketenricher" tracercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/tracer-collection" @@ -21,16 +22,20 @@ type ContainerWatcher interface { GetContainerCollection() *containercollection.ContainerCollection GetSocketEnricher() *socketenricher.SocketEnricher GetContainerSelector() *containercollection.ContainerSelector - RegisterCustomTracer(tracer CustomTracer) error - UnregisterCustomTracer(tracer CustomTracer) error RegisterContainerReceiver(receiver ContainerReceiver) UnregisterContainerReceiver(receiver ContainerReceiver) } -type CustomTracer interface { - Start() error - Stop() error - Name() string +type CustomTracerInitializer interface { + NewTracer(containerCollection *containercollection.ContainerCollection, + tracerCollection *tracercollection.TracerCollection, + containerSelector containercollection.ContainerSelector, + eventCallback ResultCallback, + thirdPartyEnricher TaskBasedEnricher, + ) (TracerInterface, error) +} + +type GenericEventReceiver interface { // TODO: either EventReceiver or EnrichedEventReceiver } type EventReceiver interface { @@ -49,7 +54,7 @@ type TaskBasedEnricher interface { SubmitEnrichmentTask(event utils.EnrichEvent, syscalls []uint64, callback ResultCallback, containerID string, processID uint32) } -type OrderedEventQueueConfig struct { - Size int `mapstructure:"size"` - CollectionDelay time.Duration `mapstructure:"collectionDelay"` +type ThirdPartyTracers struct { + ThirdPartyTracersInitializers mapset.Set[CustomTracerInitializer] + ThirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[GenericEventReceiver]] // TODO: either EventReceiver or EnrichedEventReceiver } diff --git a/pkg/containerwatcher/container_watcher_mock.go b/pkg/containerwatcher/container_watcher_mock.go index 66989e116..8e8a5c675 100644 --- a/pkg/containerwatcher/container_watcher_mock.go +++ b/pkg/containerwatcher/container_watcher_mock.go @@ -20,11 +20,11 @@ func (c ContainerWatcherMock) Start(_ context.Context) error { func (c ContainerWatcherMock) Stop() {} -func (c ContainerWatcherMock) RegisterCustomTracer(_ CustomTracer) error { +func (c ContainerWatcherMock) RegisterCustomTracer(_ TracerInterface) error { return nil } -func (c ContainerWatcherMock) UnregisterCustomTracer(_ CustomTracer) error { +func (c ContainerWatcherMock) UnregisterCustomTracer(_ TracerInterface) error { return nil } @@ -63,5 +63,3 @@ func (c CustomTracerMock) Stop() error { func (c CustomTracerMock) Name() string { return "" } - -var _ CustomTracer = (*CustomTracerMock)(nil) diff --git a/pkg/containerwatcher/tracer_factory_interface.go b/pkg/containerwatcher/tracer_factory_interface.go index bc89f8f7d..e23ab9b2b 100644 --- a/pkg/containerwatcher/tracer_factory_interface.go +++ b/pkg/containerwatcher/tracer_factory_interface.go @@ -1,12 +1,9 @@ package containerwatcher -// TracerRegistrer defines the interface for registering tracers type TracerRegistrer interface { RegisterTracer(tracer TracerInterface) } -// TracerFactoryInterface defines the interface for creating tracers type TracerFactoryInterface interface { CreateAllTracers(manager TracerRegistrer) - GetThirdPartyTracers() []CustomTracer } diff --git a/pkg/containerwatcher/tracer_interface.go b/pkg/containerwatcher/tracer_interface.go index a250c189b..47a698fd2 100644 --- a/pkg/containerwatcher/tracer_interface.go +++ b/pkg/containerwatcher/tracer_interface.go @@ -3,6 +3,7 @@ package containerwatcher import ( "context" + "github.com/kubescape/node-agent/pkg/config" "github.com/kubescape/node-agent/pkg/utils" ) @@ -21,5 +22,5 @@ type TracerInterface interface { GetEventType() utils.EventType // IsEnabled checks if this tracer should be enabled based on configuration - IsEnabled(cfg interface{}) bool + IsEnabled(cfg config.Config) bool } diff --git a/pkg/containerwatcher/v2/container_watcher.go b/pkg/containerwatcher/v2/container_watcher.go index c541405c5..ddefcc4ab 100644 --- a/pkg/containerwatcher/v2/container_watcher.go +++ b/pkg/containerwatcher/v2/container_watcher.go @@ -7,7 +7,6 @@ import ( "time" mapset "github.com/deckarep/golang-set/v2" - "github.com/goradd/maps" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" containerutilsTypes "github.com/inspektor-gadget/inspektor-gadget/pkg/container-utils/types" "github.com/inspektor-gadget/inspektor-gadget/pkg/socketenricher" @@ -31,7 +30,6 @@ import ( "github.com/kubescape/node-agent/pkg/rulebindingmanager" "github.com/kubescape/node-agent/pkg/rulemanager" "github.com/kubescape/node-agent/pkg/sbommanager" - "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/workerpool" "github.com/panjf2000/ants/v2" ) @@ -76,9 +74,9 @@ type ContainerWatcher struct { workerChan chan *events.EnrichedEvent // Channel for worker pool invocation // Third party components - thirdPartyTracers mapset.Set[containerwatcher.CustomTracer] - thirdPartyContainerReceivers mapset.Set[containerwatcher.ContainerReceiver] - thirdPartyEnricher containerwatcher.TaskBasedEnricher + thirdPartyTracersInitializers mapset.Set[containerwatcher.CustomTracerInitializer] + thirdPartyEnricher containerwatcher.TaskBasedEnricher + thirdPartyContainerReceivers mapset.Set[containerwatcher.ContainerReceiver] // Cache and state objectCache objectcache.ObjectCache @@ -111,13 +109,13 @@ func CreateContainerWatcher( sbomManager sbommanager.SbomManagerClient, ruleBindingPodNotify *chan rulebindingmanager.RuleBindingNotify, runtime *containerutilsTypes.RuntimeConfig, - thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.EventReceiver]], thirdPartyEnricher containerwatcher.TaskBasedEnricher, processTreeManager processtree.ProcessTreeManager, clusterName string, objectCache objectcache.ObjectCache, networkStreamClient networkstream.NetworkStreamClient, containerProcessTree containerprocesstree.ContainerProcessTree, + thirdPartyTracers containerwatcher.ThirdPartyTracers, ) (*ContainerWatcher, error) { // Create container collection @@ -144,7 +142,7 @@ func CreateContainerWatcher( malwareManager, networkStreamClient, metrics, - thirdPartyEventReceivers, + thirdPartyTracers.ThirdPartyEventReceivers, thirdPartyEnricher, rulePolicyReporter, ) @@ -192,9 +190,9 @@ func CreateContainerWatcher( workerChan: make(chan *events.EnrichedEvent, cfg.WorkerChannelSize), // Third party components - thirdPartyTracers: mapset.NewSet[containerwatcher.CustomTracer](), - thirdPartyContainerReceivers: mapset.NewSet[containerwatcher.ContainerReceiver](), - thirdPartyEnricher: thirdPartyEnricher, + thirdPartyTracersInitializers: thirdPartyTracers.ThirdPartyTracersInitializers, + thirdPartyEnricher: thirdPartyEnricher, + thirdPartyContainerReceivers: mapset.NewSet[containerwatcher.ContainerReceiver](), // Cache and state objectCache: objectCache, @@ -223,13 +221,13 @@ func CreateIGContainerWatcher( sbomManager sbommanager.SbomManagerClient, ruleBindingPodNotify *chan rulebindingmanager.RuleBindingNotify, runtime *containerutilsTypes.RuntimeConfig, - thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.EventReceiver]], thirdPartyEnricher containerwatcher.TaskBasedEnricher, processTreeManager processtree.ProcessTreeManager, clusterName string, objectCache objectcache.ObjectCache, networkStreamClient networkstream.NetworkStreamClient, containerProcessTree containerprocesstree.ContainerProcessTree, + thirdPartyTracers containerwatcher.ThirdPartyTracers, ) (containerwatcher.ContainerWatcher, error) { return CreateContainerWatcher( @@ -244,13 +242,13 @@ func CreateIGContainerWatcher( sbomManager, ruleBindingPodNotify, runtime, - thirdPartyEventReceivers, thirdPartyEnricher, processTreeManager, clusterName, objectCache, networkStreamClient, containerProcessTree, + thirdPartyTracers, ) } @@ -291,7 +289,6 @@ func (cw *ContainerWatcher) Start(ctx context.Context) error { }) // Start ordered event queue BEFORE tracers - // No need to start queue anymore - it's just a data structure // Start event processing loop go cw.eventProcessingLoop() @@ -308,7 +305,7 @@ func (cw *ContainerWatcher) Start(ctx context.Context) error { cw.socketEnricher, cw.containerProfileManager, cw.ruleManager, - cw.thirdPartyTracers, + cw.thirdPartyTracersInitializers, cw.thirdPartyEnricher, cw.cfg, cw.processTreeManager, @@ -345,8 +342,6 @@ func (cw *ContainerWatcher) Stop() { cw.tracerManagerV2.StopAllTracers() } - // No need to stop queue - it's just a data structure - // Close worker channel to signal worker goroutine to stop if cw.workerChan != nil { close(cw.workerChan) @@ -388,19 +383,6 @@ func (cw *ContainerWatcher) GetContainerSelector() *containercollection.Containe return &cw.containerSelector } -// RegisterCustomTracer registers a custom tracer -func (cw *ContainerWatcher) RegisterCustomTracer(tracer containerwatcher.CustomTracer) error { - cw.thirdPartyTracers.Add(tracer) - return nil -} - -// UnregisterCustomTracer unregisters a custom tracer -func (cw *ContainerWatcher) UnregisterCustomTracer(tracer containerwatcher.CustomTracer) error { - cw.thirdPartyTracers.Remove(tracer) - return nil -} - -// RegisterContainerReceiver registers a container receiver func (cw *ContainerWatcher) RegisterContainerReceiver(receiver containerwatcher.ContainerReceiver) { cw.thirdPartyContainerReceivers.Add(receiver) } diff --git a/pkg/containerwatcher/v2/event_handler_factory.go b/pkg/containerwatcher/v2/event_handler_factory.go index 1e45457aa..fcb0a448e 100644 --- a/pkg/containerwatcher/v2/event_handler_factory.go +++ b/pkg/containerwatcher/v2/event_handler_factory.go @@ -18,7 +18,6 @@ import ( tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" "github.com/kubescape/node-agent/pkg/eventreporters/rulepolicy" "github.com/kubescape/node-agent/pkg/malwaremanager" - "github.com/kubescape/node-agent/pkg/metricsmanager" "github.com/kubescape/node-agent/pkg/networkstream" "github.com/kubescape/node-agent/pkg/rulemanager" @@ -27,7 +26,9 @@ import ( // Manager represents a component that can receive events type Manager interface { - ReportEvent(eventType utils.EventType, event utils.K8sEvent) + // TODO: Find a better way to handle this + // containerwatcher.EventReceiver + // containerwatcher.EnrichedEventReceiver } // ManagerAdapter adapts different manager interfaces to the common Manager interface @@ -50,7 +51,7 @@ func (ma *ManagerAdapter) ReportEvent(eventType utils.EventType, event utils.K8s // EventHandlerFactory manages the mapping of event types to their managers type EventHandlerFactory struct { handlers map[utils.EventType][]Manager - thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.EventReceiver]] + thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.GenericEventReceiver]] thirdPartyEnricher containerwatcher.TaskBasedEnricher cfg config.Config containerCollection *containercollection.ContainerCollection @@ -67,7 +68,7 @@ func NewEventHandlerFactory( malwareManager malwaremanager.MalwareManagerClient, networkStreamClient networkstream.NetworkStreamClient, metrics metricsmanager.MetricsManager, - thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.EventReceiver]], + thirdPartyEventReceivers *maps.SafeMap[utils.EventType, mapset.Set[containerwatcher.GenericEventReceiver]], thirdPartyEnricher containerwatcher.TaskBasedEnricher, rulePolicyReporter *rulepolicy.RulePolicyReporter, ) *EventHandlerFactory { @@ -119,6 +120,7 @@ func NewEventHandlerFactory( rulePolicyAdapter := NewManagerAdapter(func(eventType utils.EventType, event utils.K8sEvent) { switch eventType { + // Won't work for 3rd party tracers, we need to extract comm and containerID from the event by interface case utils.ExecveEventType: if execEvent, ok := event.(*events.ExecEvent); ok { rulePolicyReporter.ReportEvent(eventType, event, execEvent.Runtime.ContainerID, execEvent.Comm) @@ -195,13 +197,13 @@ func (ehf *EventHandlerFactory) ProcessEvent(enrichedEvent *events.EnrichedEvent for _, handler := range handlers { if enrichedHandler, ok := handler.(containerwatcher.EnrichedEventReceiver); ok { enrichedHandler.ReportEnrichedEvent(enrichedEvent) - } else { + } else if handler, ok := handler.(containerwatcher.EventReceiver); ok { handler.ReportEvent(enrichedEvent.EventType, enrichedEvent.Event) } } // Report to third-party event receivers - ehf.reportEventToThirdPartyTracers(enrichedEvent.EventType, enrichedEvent.Event) + ehf.reportEventToThirdPartyTracers(enrichedEvent) } // registerHandlers registers all handlers for different event types @@ -255,11 +257,15 @@ func (ehf *EventHandlerFactory) registerHandlers( } // reportEventToThirdPartyTracers reports events to third-party tracers -func (ehf *EventHandlerFactory) reportEventToThirdPartyTracers(eventType utils.EventType, event utils.K8sEvent) { +func (ehf *EventHandlerFactory) reportEventToThirdPartyTracers(enrichedEvent *events.EnrichedEvent) { if ehf.thirdPartyEventReceivers != nil { - if eventReceivers, ok := ehf.thirdPartyEventReceivers.Load(eventType); ok { + if eventReceivers, ok := ehf.thirdPartyEventReceivers.Load(enrichedEvent.EventType); ok { for receiver := range eventReceivers.Iter() { - receiver.ReportEvent(eventType, event) + if enrichedHandler, ok := receiver.(containerwatcher.EnrichedEventReceiver); ok { + enrichedHandler.ReportEnrichedEvent(enrichedEvent) + } else if handler, ok := receiver.(containerwatcher.EventReceiver); ok { + handler.ReportEvent(enrichedEvent.EventType, enrichedEvent.Event) + } } } } diff --git a/pkg/containerwatcher/v2/tracer_manager.go b/pkg/containerwatcher/v2/tracer_manager.go index f318656ac..09883ae18 100644 --- a/pkg/containerwatcher/v2/tracer_manager.go +++ b/pkg/containerwatcher/v2/tracer_manager.go @@ -16,7 +16,7 @@ type TracerManager struct { cfg config.Config tracers map[utils.EventType]containerwatcher.TracerInterface tracerFactory containerwatcher.TracerFactoryInterface - thirdPartyTracers []containerwatcher.CustomTracer + thirdPartyTracers []containerwatcher.TracerInterface } func NewTracerManager(cfg config.Config, tracerFactory containerwatcher.TracerFactoryInterface) *TracerManager { @@ -24,7 +24,7 @@ func NewTracerManager(cfg config.Config, tracerFactory containerwatcher.TracerFa cfg: cfg, tracers: make(map[utils.EventType]containerwatcher.TracerInterface), tracerFactory: tracerFactory, - thirdPartyTracers: make([]containerwatcher.CustomTracer, 0), + thirdPartyTracers: make([]containerwatcher.TracerInterface, 0), } } @@ -53,15 +53,11 @@ func (tm *TracerManager) StartAllTracers(ctx context.Context) error { if err := tracer.Start(ctx); err != nil { return err } + logger.L().Info("Started tracer", helpers.String("tracer", tracer.GetName())) } } - tm.thirdPartyTracers = tm.tracerFactory.GetThirdPartyTracers() - if err := tm.startThirdPartyTracers(); err != nil { - return err - } - return nil } @@ -97,23 +93,11 @@ func (tm *TracerManager) startProcfsTracer(ctx context.Context) error { return nil } -// startThirdPartyTracers starts all registered third-party tracers -func (tm *TracerManager) startThirdPartyTracers() error { - for _, tracer := range tm.thirdPartyTracers { - if err := tracer.Start(); err != nil { - logger.L().Error("error starting custom tracer", helpers.String("tracer", tracer.Name()), helpers.Error(err)) - return fmt.Errorf("starting custom tracer %s: %w", tracer.Name(), err) - } - logger.L().Info("started custom tracer", helpers.String("tracer", tracer.Name())) - } - return nil -} - // stopThirdPartyTracers stops all registered third-party tracers func (tm *TracerManager) stopThirdPartyTracers() { for _, tracer := range tm.thirdPartyTracers { if err := tracer.Stop(); err != nil { - logger.L().Error("error stopping custom tracer", helpers.String("tracer", tracer.Name()), helpers.Error(err)) + logger.L().Error("error stopping custom tracer", helpers.String("tracer", tracer.GetName()), helpers.Error(err)) } } } diff --git a/pkg/containerwatcher/v2/tracers/capabilities.go b/pkg/containerwatcher/v2/tracers/capabilities.go index ddb43812b..1f20ea212 100644 --- a/pkg/containerwatcher/v2/tracers/capabilities.go +++ b/pkg/containerwatcher/v2/tracers/capabilities.go @@ -23,7 +23,7 @@ type CapabilitiesTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracercapabilities.Tracer } @@ -32,7 +32,7 @@ func NewCapabilitiesTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, ) *CapabilitiesTracer { return &CapabilitiesTracer{ containerCollection: containerCollection, @@ -91,11 +91,8 @@ func (ct *CapabilitiesTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (ct *CapabilitiesTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - return !config.DCapSys && config.EnableRuntimeDetection - } - return false +func (ct *CapabilitiesTracer) IsEnabled(cfg config.Config) bool { + return !cfg.DCapSys && cfg.EnableRuntimeDetection } // capabilitiesEventCallback handles capabilities events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/dns.go b/pkg/containerwatcher/v2/tracers/dns.go index 9c43c0990..507eaa442 100644 --- a/pkg/containerwatcher/v2/tracers/dns.go +++ b/pkg/containerwatcher/v2/tracers/dns.go @@ -27,7 +27,7 @@ type DNSTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracerdns.Tracer socketEnricher *socketenricher.SocketEnricher } @@ -37,7 +37,7 @@ func NewDNSTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, socketEnricher *socketenricher.SocketEnricher, ) *DNSTracer { return &DNSTracer{ @@ -114,14 +114,11 @@ func (dt *DNSTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (dt *DNSTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - if config.DDns { - return false - } - return config.EnableNetworkTracing || config.EnableRuntimeDetection +func (dt *DNSTracer) IsEnabled(cfg config.Config) bool { + if cfg.DDns { + return false } - return false + return cfg.EnableNetworkTracing || cfg.EnableRuntimeDetection } // dnsEventCallback handles DNS events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/exec.go b/pkg/containerwatcher/v2/tracers/exec.go index d53b73b6b..fa0b7ed30 100644 --- a/pkg/containerwatcher/v2/tracers/exec.go +++ b/pkg/containerwatcher/v2/tracers/exec.go @@ -24,7 +24,7 @@ type ExecTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracerexec.Tracer thirdPartyEnricher containerwatcher.TaskBasedEnricher } @@ -34,7 +34,7 @@ func NewExecTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, thirdPartyEnricher containerwatcher.TaskBasedEnricher, ) *ExecTracer { return &ExecTracer{ @@ -95,14 +95,11 @@ func (et *ExecTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (et *ExecTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - if config.DExec { - return false - } - return config.EnableApplicationProfile || config.EnableRuntimeDetection +func (et *ExecTracer) IsEnabled(cfg config.Config) bool { + if cfg.DExec { + return false } - return false + return cfg.EnableApplicationProfile || cfg.EnableRuntimeDetection } // execEventCallback handles exec events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/exit.go b/pkg/containerwatcher/v2/tracers/exit.go index d7d671cf9..391d81115 100644 --- a/pkg/containerwatcher/v2/tracers/exit.go +++ b/pkg/containerwatcher/v2/tracers/exit.go @@ -23,7 +23,7 @@ type ExitTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracerexit.Tracer } @@ -32,7 +32,7 @@ func NewExitTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, ) *ExitTracer { return &ExitTracer{ containerCollection: containerCollection, @@ -91,14 +91,11 @@ func (et *ExitTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (et *ExitTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - if config.DExit { - return false - } - return config.EnableRuntimeDetection || config.EnableApplicationProfile +func (et *ExitTracer) IsEnabled(cfg config.Config) bool { + if cfg.DExit { + return false } - return false + return cfg.EnableRuntimeDetection || cfg.EnableApplicationProfile } // exitEventCallback handles exit events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/fork.go b/pkg/containerwatcher/v2/tracers/fork.go index e97a0d2f9..68b0009c0 100644 --- a/pkg/containerwatcher/v2/tracers/fork.go +++ b/pkg/containerwatcher/v2/tracers/fork.go @@ -23,7 +23,7 @@ type ForkTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracerfork.Tracer } @@ -32,7 +32,7 @@ func NewForkTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, ) *ForkTracer { return &ForkTracer{ containerCollection: containerCollection, @@ -91,14 +91,11 @@ func (ft *ForkTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (ft *ForkTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - if config.DFork { - return false - } - return config.EnableApplicationProfile || config.EnableRuntimeDetection +func (ft *ForkTracer) IsEnabled(cfg config.Config) bool { + if cfg.DFork { + return false } - return false + return cfg.EnableApplicationProfile || cfg.EnableRuntimeDetection } // forkEventCallback handles fork events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/hardlink.go b/pkg/containerwatcher/v2/tracers/hardlink.go index 2824defa7..76f4d2078 100644 --- a/pkg/containerwatcher/v2/tracers/hardlink.go +++ b/pkg/containerwatcher/v2/tracers/hardlink.go @@ -26,7 +26,7 @@ type HardlinkTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracerhardlink.Tracer thirdPartyEnricher containerwatcher.TaskBasedEnricher } @@ -36,7 +36,7 @@ func NewHardlinkTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, thirdPartyEnricher containerwatcher.TaskBasedEnricher, ) *HardlinkTracer { return &HardlinkTracer{ @@ -97,11 +97,8 @@ func (ht *HardlinkTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (ht *HardlinkTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - return !config.DHardlink && config.EnableRuntimeDetection - } - return false +func (ht *HardlinkTracer) IsEnabled(cfg config.Config) bool { + return !cfg.DHardlink && cfg.EnableRuntimeDetection } // hardlinkEventCallback handles hardlink events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/http.go b/pkg/containerwatcher/v2/tracers/http.go index 7f34dee0d..8942e7a5a 100644 --- a/pkg/containerwatcher/v2/tracers/http.go +++ b/pkg/containerwatcher/v2/tracers/http.go @@ -29,7 +29,7 @@ type HTTPTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracerhttp.Tracer } @@ -38,7 +38,7 @@ func NewHTTPTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, ) *HTTPTracer { return &HTTPTracer{ containerCollection: containerCollection, @@ -97,11 +97,8 @@ func (ht *HTTPTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (ht *HTTPTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - return !config.DHttp && config.EnableHttpDetection - } - return false +func (ht *HTTPTracer) IsEnabled(cfg config.Config) bool { + return !cfg.DHttp && cfg.EnableHttpDetection } // httpEventCallback handles HTTP events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/iouring.go b/pkg/containerwatcher/v2/tracers/iouring.go index d8750b936..adbf9e573 100644 --- a/pkg/containerwatcher/v2/tracers/iouring.go +++ b/pkg/containerwatcher/v2/tracers/iouring.go @@ -25,7 +25,7 @@ type IoUringTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *traceriouring.Tracer } @@ -34,7 +34,7 @@ func NewIoUringTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, ) *IoUringTracer { return &IoUringTracer{ containerCollection: containerCollection, @@ -94,11 +94,8 @@ func (it *IoUringTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (it *IoUringTracer) IsEnabled(cfg interface{}) bool { - if conf, ok := cfg.(config.Config); ok { - return !conf.DIouring && conf.EnableRuntimeDetection - } - return false +func (it *IoUringTracer) IsEnabled(cfg config.Config) bool { + return !cfg.DIouring && cfg.EnableRuntimeDetection } // iouringEventCallback handles io_uring events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/network.go b/pkg/containerwatcher/v2/tracers/network.go index 625bef76e..18eec1364 100644 --- a/pkg/containerwatcher/v2/tracers/network.go +++ b/pkg/containerwatcher/v2/tracers/network.go @@ -29,7 +29,7 @@ type NetworkTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracernetwork.Tracer socketEnricher *socketenricher.SocketEnricher kubeIPInstance operators.OperatorInstance @@ -41,7 +41,7 @@ func NewNetworkTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, socketEnricher *socketenricher.SocketEnricher, ) *NetworkTracer { return &NetworkTracer{ @@ -123,14 +123,11 @@ func (nt *NetworkTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (nt *NetworkTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - if config.DNetwork { - return false - } - return config.EnableNetworkTracing || config.EnableRuntimeDetection - } - return false +func (nt *NetworkTracer) IsEnabled(cfg config.Config) bool { + if cfg.DNetwork { + return false + } + return cfg.EnableNetworkTracing || cfg.EnableRuntimeDetection } // networkEventCallback handles network events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/open.go b/pkg/containerwatcher/v2/tracers/open.go index 7e3bdf4b1..a93243225 100644 --- a/pkg/containerwatcher/v2/tracers/open.go +++ b/pkg/containerwatcher/v2/tracers/open.go @@ -25,7 +25,7 @@ type OpenTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *traceropen.Tracer cfg config.Config thirdPartyEnricher containerwatcher.TaskBasedEnricher @@ -36,7 +36,7 @@ func NewOpenTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, thirdPartyEnricher containerwatcher.TaskBasedEnricher, ) *OpenTracer { return &OpenTracer{ @@ -97,15 +97,12 @@ func (ot *OpenTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (ot *OpenTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - ot.cfg = config - if config.DOpen { - return false - } - return config.EnableApplicationProfile || config.EnableRuntimeDetection +func (ot *OpenTracer) IsEnabled(cfg config.Config) bool { + ot.cfg = cfg + if cfg.DOpen { + return false } - return false + return cfg.EnableApplicationProfile || cfg.EnableRuntimeDetection } // openEventCallback handles open events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/procfs.go b/pkg/containerwatcher/v2/tracers/procfs.go index b6f07f275..43c57ffd5 100644 --- a/pkg/containerwatcher/v2/tracers/procfs.go +++ b/pkg/containerwatcher/v2/tracers/procfs.go @@ -43,7 +43,7 @@ func NewProcfsTracer( tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, procfsEventCallback func(utils.K8sEvent, string, uint32), - exitEventCallback func(utils.K8sEvent, string, uint32), + exitEventCallback containerwatcher.ResultCallback, cfg config.Config, processTreeManager processtree.ProcessTreeManager, ) *ProcfsTracer { @@ -108,11 +108,8 @@ func (pt *ProcfsTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (pt *ProcfsTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - return config.EnableRuntimeDetection - } - return false +func (pt *ProcfsTracer) IsEnabled(cfg config.Config) bool { + return cfg.EnableRuntimeDetection } // processEvents processes events from the procfs feeder diff --git a/pkg/containerwatcher/v2/tracers/ptrace.go b/pkg/containerwatcher/v2/tracers/ptrace.go index 6a6ba4e7a..228a67f94 100644 --- a/pkg/containerwatcher/v2/tracers/ptrace.go +++ b/pkg/containerwatcher/v2/tracers/ptrace.go @@ -23,7 +23,7 @@ type PtraceTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracerptrace.Tracer } @@ -32,7 +32,7 @@ func NewPtraceTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, ) *PtraceTracer { return &PtraceTracer{ containerCollection: containerCollection, @@ -91,11 +91,8 @@ func (pt *PtraceTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (pt *PtraceTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - return !config.DPtrace && config.EnableRuntimeDetection - } - return false +func (pt *PtraceTracer) IsEnabled(cfg config.Config) bool { + return !cfg.DPtrace && cfg.EnableRuntimeDetection } // ptraceEventCallback handles ptrace events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/randomx.go b/pkg/containerwatcher/v2/tracers/randomx.go index b2a31ce67..4f22020a1 100644 --- a/pkg/containerwatcher/v2/tracers/randomx.go +++ b/pkg/containerwatcher/v2/tracers/randomx.go @@ -26,7 +26,7 @@ type RandomXTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracerandomx.Tracer } @@ -35,7 +35,7 @@ func NewRandomXTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, ) *RandomXTracer { return &RandomXTracer{ containerCollection: containerCollection, @@ -100,11 +100,8 @@ func (rt *RandomXTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (rt *RandomXTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - return !config.DRandomx && config.EnableRuntimeDetection && runtime.GOARCH == "amd64" - } - return false +func (rt *RandomXTracer) IsEnabled(cfg config.Config) bool { + return !cfg.DRandomx && cfg.EnableRuntimeDetection && runtime.GOARCH == "amd64" } // randomxEventCallback handles RandomX events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/ssh.go b/pkg/containerwatcher/v2/tracers/ssh.go index c7a31428a..124f0b27a 100644 --- a/pkg/containerwatcher/v2/tracers/ssh.go +++ b/pkg/containerwatcher/v2/tracers/ssh.go @@ -27,7 +27,7 @@ type SSHTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracerssh.Tracer socketEnricher *socketenricher.SocketEnricher } @@ -37,7 +37,7 @@ func NewSSHTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, socketEnricher *socketenricher.SocketEnricher, ) *SSHTracer { return &SSHTracer{ @@ -114,11 +114,8 @@ func (st *SSHTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (st *SSHTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - return !config.DSsh && config.EnableRuntimeDetection - } - return false +func (st *SSHTracer) IsEnabled(cfg config.Config) bool { + return !cfg.DSsh && cfg.EnableRuntimeDetection } // sshEventCallback handles SSH events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/symlink.go b/pkg/containerwatcher/v2/tracers/symlink.go index ebf7ba30a..c94b8a5bc 100644 --- a/pkg/containerwatcher/v2/tracers/symlink.go +++ b/pkg/containerwatcher/v2/tracers/symlink.go @@ -26,7 +26,7 @@ type SymlinkTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *tracersymlink.Tracer thirdPartyEnricher containerwatcher.TaskBasedEnricher } @@ -36,7 +36,7 @@ func NewSymlinkTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, thirdPartyEnricher containerwatcher.TaskBasedEnricher, ) *SymlinkTracer { return &SymlinkTracer{ @@ -97,11 +97,8 @@ func (st *SymlinkTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (st *SymlinkTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - return !config.DSymlink && config.EnableRuntimeDetection - } - return false +func (st *SymlinkTracer) IsEnabled(cfg config.Config) bool { + return !cfg.DSymlink && cfg.EnableRuntimeDetection } // symlinkEventCallback handles symlink events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/syscall.go b/pkg/containerwatcher/v2/tracers/syscall.go index 91067c9f5..89b2f32e2 100644 --- a/pkg/containerwatcher/v2/tracers/syscall.go +++ b/pkg/containerwatcher/v2/tracers/syscall.go @@ -66,14 +66,11 @@ func (st *SyscallTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (st *SyscallTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - if config.DSeccomp { - return false - } - return config.EnableRuntimeDetection || config.EnableSeccomp +func (st *SyscallTracer) IsEnabled(cfg config.Config) bool { + if cfg.DSeccomp { + return false } - return false + return cfg.EnableRuntimeDetection || cfg.EnableSeccomp } // Peek provides the peek function for other components diff --git a/pkg/containerwatcher/v2/tracers/top.go b/pkg/containerwatcher/v2/tracers/top.go index 9456532fd..a771146c7 100644 --- a/pkg/containerwatcher/v2/tracers/top.go +++ b/pkg/containerwatcher/v2/tracers/top.go @@ -27,7 +27,7 @@ type TopTracer struct { containerCollection *containercollection.ContainerCollection tracerCollection *tracercollection.TracerCollection containerSelector containercollection.ContainerSelector - eventCallback func(utils.K8sEvent, string, uint32) + eventCallback containerwatcher.ResultCallback tracer *toptracer.Tracer } @@ -36,7 +36,7 @@ func NewTopTracer( containerCollection *containercollection.ContainerCollection, tracerCollection *tracercollection.TracerCollection, containerSelector containercollection.ContainerSelector, - eventCallback func(utils.K8sEvent, string, uint32), + eventCallback containerwatcher.ResultCallback, ) *TopTracer { return &TopTracer{ containerCollection: containerCollection, @@ -89,11 +89,8 @@ func (tt *TopTracer) GetEventType() utils.EventType { } // IsEnabled checks if this tracer should be enabled based on configuration -func (tt *TopTracer) IsEnabled(cfg interface{}) bool { - if config, ok := cfg.(config.Config); ok { - return !config.DTop && config.EnablePrometheusExporter - } - return false +func (tt *TopTracer) IsEnabled(cfg config.Config) bool { + return !cfg.DTop && cfg.EnablePrometheusExporter } // topEventCallback handles top events from the tracer diff --git a/pkg/containerwatcher/v2/tracers/tracer_factory.go b/pkg/containerwatcher/v2/tracers/tracer_factory.go index 9590c059d..e8925890c 100644 --- a/pkg/containerwatcher/v2/tracers/tracer_factory.go +++ b/pkg/containerwatcher/v2/tracers/tracer_factory.go @@ -5,6 +5,8 @@ import ( containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" "github.com/inspektor-gadget/inspektor-gadget/pkg/socketenricher" tracercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/tracer-collection" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" "github.com/kubescape/node-agent/pkg/config" "github.com/kubescape/node-agent/pkg/containerprofilemanager" "github.com/kubescape/node-agent/pkg/containerwatcher" @@ -27,7 +29,7 @@ type TracerFactory struct { socketEnricher *socketenricher.SocketEnricher containerProfileManager containerprofilemanager.ContainerProfileManagerClient ruleManager rulemanager.RuleManagerClient - thirdPartyTracers mapset.Set[containerwatcher.CustomTracer] + thirdPartyTracersInit mapset.Set[containerwatcher.CustomTracerInitializer] thirdPartyEnricher containerwatcher.TaskBasedEnricher cfg config.Config processTreeManager processtree.ProcessTreeManager @@ -42,7 +44,7 @@ func NewTracerFactory( socketEnricher *socketenricher.SocketEnricher, containerProfileManager containerprofilemanager.ContainerProfileManagerClient, ruleManager rulemanager.RuleManagerClient, - thirdPartyTracers mapset.Set[containerwatcher.CustomTracer], + thirdPartyTracers mapset.Set[containerwatcher.CustomTracerInitializer], thirdPartyEnricher containerwatcher.TaskBasedEnricher, cfg config.Config, processTreeManager processtree.ProcessTreeManager, @@ -55,7 +57,7 @@ func NewTracerFactory( socketEnricher: socketEnricher, containerProfileManager: containerProfileManager, ruleManager: ruleManager, - thirdPartyTracers: thirdPartyTracers, + thirdPartyTracersInit: thirdPartyTracers, thirdPartyEnricher: thirdPartyEnricher, cfg: cfg, processTreeManager: processTreeManager, @@ -221,19 +223,22 @@ func (tf *TracerFactory) CreateAllTracers(manager containerwatcher.TracerRegistr tf.createEventCallback(utils.AllEventType), ) manager.RegisterTracer(topTracer) -} -// GetThirdPartyTracers returns all registered third-party tracers -func (tf *TracerFactory) GetThirdPartyTracers() []containerwatcher.CustomTracer { - tracers := make([]containerwatcher.CustomTracer, 0, tf.thirdPartyTracers.Cardinality()) - for tracer := range tf.thirdPartyTracers.Iter() { - tracers = append(tracers, tracer) + // Create third-party tracers + for tracerInit := range tf.thirdPartyTracersInit.Iter() { + tracer, err := tracerInit.NewTracer(tf.containerCollection, tf.tracerCollection, tf.containerSelector, tf.createEventCallback(utils.AllEventType), tf.thirdPartyEnricher) + if err != nil { + logger.L().Error("error creating third-party tracer", helpers.Error(err)) + continue + } + + manager.RegisterTracer(tracer) } - return tracers + } // createEventCallback creates a simple callback that sends events directly to the ordered event queue -func (tf *TracerFactory) createEventCallback(eventType utils.EventType) func(utils.K8sEvent, string, uint32) { +func (tf *TracerFactory) createEventCallback(eventType utils.EventType) containerwatcher.ResultCallback { return func(event utils.K8sEvent, containerID string, processID uint32) { tf.orderedEventQueue.AddEventDirect(eventType, event, containerID, processID) } diff --git a/pkg/exporters/alert_manager.go b/pkg/exporters/alert_manager.go index 4fdc0d11d..ef761ec93 100644 --- a/pkg/exporters/alert_manager.go +++ b/pkg/exporters/alert_manager.go @@ -8,6 +8,7 @@ import ( "encoding/json" "fmt" "os" + "strconv" "time" apitypes "github.com/armosec/armoapi-go/armotypes" @@ -15,7 +16,7 @@ import ( "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" "github.com/kubescape/node-agent/pkg/malwaremanager" - "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/rulemanager/types" "github.com/kubescape/node-agent/pkg/utils" "github.com/prometheus/alertmanager/api/v2/client" "github.com/prometheus/alertmanager/api/v2/client/alert" @@ -46,7 +47,7 @@ func InitAlertManagerExporter(alertManagerURL string) *AlertManagerExporter { } } -func (ame *AlertManagerExporter) SendRuleAlert(failedRule ruleengine.RuleFailure) { +func (ame *AlertManagerExporter) SendRuleAlert(failedRule types.RuleFailure) { profileMetadata := failedRule.GetBaseRuntimeAlert().ProfileMetadata failOnProfile := false completedStatus := "" @@ -98,9 +99,9 @@ func (ame *AlertManagerExporter) SendRuleAlert(failedRule ruleengine.RuleFailure "container_name": failedRule.GetRuntimeAlertK8sDetails().ContainerName, "namespace": failedRule.GetRuntimeAlertK8sDetails().Namespace, "pod_name": failedRule.GetRuntimeAlertK8sDetails().PodName, - "severity": PriorityToStatus(failedRule.GetBaseRuntimeAlert().Severity), "host": ame.Host, "node_name": ame.NodeName, + "severity": strconv.Itoa(failedRule.GetBaseRuntimeAlert().Severity), "pid": fmt.Sprintf("%d", process.PID), "ppid": fmt.Sprintf("%d", process.PPID), "pcomm": process.Pcomm, diff --git a/pkg/exporters/alert_manager_test.go b/pkg/exporters/alert_manager_test.go index 374dd2c4e..48a49f9db 100644 --- a/pkg/exporters/alert_manager_test.go +++ b/pkg/exporters/alert_manager_test.go @@ -12,7 +12,7 @@ import ( apitypes "github.com/armosec/armoapi-go/armotypes" igtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" mmtypes "github.com/kubescape/node-agent/pkg/malwaremanager/v1/types" - "github.com/kubescape/node-agent/pkg/ruleengine/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/types" "github.com/stretchr/testify/assert" ) @@ -36,7 +36,7 @@ func TestSendAlert(t *testing.T) { } // Call SendAlert - exporter.SendRuleAlert(&ruleengine.GenericRuleFailure{ + exporter.SendRuleAlert(&types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ AlertName: "testrule", }, @@ -67,7 +67,7 @@ func TestSendAlert(t *testing.T) { assert.Equal(t, "testnamespace", alertLabels["namespace"]) assert.Equal(t, "testpodname", alertLabels["pod_name"]) assert.Equal(t, "", alertLabels["node_name"]) - assert.Equal(t, "none", alertLabels["severity"]) + assert.Equal(t, "0", alertLabels["severity"]) assert.Equal(t, "Rule 'testrule' in 'testpodname' namespace 'testnamespace' failed", alert["annotations"].(map[string]interface{})["summary"]) assert.Equal(t, "Application profile is missing", alert["annotations"].(map[string]interface{})["message"]) assert.Equal(t, strings.HasPrefix(fmt.Sprint(alert["generatorURL"]), "https://armosec.github.io/kubecop/alertviewer/"), true) diff --git a/pkg/exporters/csv_exporter.go b/pkg/exporters/csv_exporter.go index 9bf703d31..f316a2634 100644 --- a/pkg/exporters/csv_exporter.go +++ b/pkg/exporters/csv_exporter.go @@ -6,7 +6,7 @@ import ( "os" "github.com/kubescape/node-agent/pkg/malwaremanager" - "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/rulemanager/types" "github.com/sirupsen/logrus" ) @@ -51,7 +51,7 @@ func InitCsvExporter(csvRulePath, csvMalwarePath string) *CsvExporter { } // SendRuleAlert sends an alert to csv -func (ce *CsvExporter) SendRuleAlert(failedRule ruleengine.RuleFailure) { +func (ce *CsvExporter) SendRuleAlert(failedRule types.RuleFailure) { csvFile, err := os.OpenFile(ce.CsvRulePath, os.O_APPEND|os.O_WRONLY, 0644) if err != nil { logrus.Errorf("failed to initialize csv exporter: %v", err) diff --git a/pkg/exporters/csv_exporter_test.go b/pkg/exporters/csv_exporter_test.go index 25dcfcea4..ab05a89d0 100644 --- a/pkg/exporters/csv_exporter_test.go +++ b/pkg/exporters/csv_exporter_test.go @@ -6,7 +6,7 @@ import ( "testing" mmtypes "github.com/kubescape/node-agent/pkg/malwaremanager/v1/types" - "github.com/kubescape/node-agent/pkg/ruleengine/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/types" apitypes "github.com/armosec/armoapi-go/armotypes" igtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" @@ -18,7 +18,7 @@ func TestCsvExporter(t *testing.T) { t.Fatalf("Expected csvExporter to not be nil") } - csvExporter.SendRuleAlert(&ruleengine.GenericRuleFailure{ + csvExporter.SendRuleAlert(&types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ AlertName: "testrule", }, diff --git a/pkg/exporters/exporter.go b/pkg/exporters/exporter.go index a32af2001..8521aa761 100644 --- a/pkg/exporters/exporter.go +++ b/pkg/exporters/exporter.go @@ -2,13 +2,13 @@ package exporters import ( "github.com/kubescape/node-agent/pkg/malwaremanager" - "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/rulemanager/types" ) // generic exporter interface type Exporter interface { // SendRuleAlert sends an alert on failed rule to the exporter - SendRuleAlert(failedRule ruleengine.RuleFailure) + SendRuleAlert(failedRule types.RuleFailure) // SendMalwareAlert sends an alert on malware detection to the exporter. SendMalwareAlert(malwareResult malwaremanager.MalwareResult) } @@ -17,7 +17,7 @@ var _ Exporter = (*ExporterMock)(nil) type ExporterMock struct{} -func (e *ExporterMock) SendRuleAlert(_ ruleengine.RuleFailure) { +func (e *ExporterMock) SendRuleAlert(_ types.RuleFailure) { } func (e *ExporterMock) SendMalwareAlert(_ malwaremanager.MalwareResult) { diff --git a/pkg/exporters/exporters_bus.go b/pkg/exporters/exporters_bus.go index 08f4c718e..dc43fab03 100644 --- a/pkg/exporters/exporters_bus.go +++ b/pkg/exporters/exporters_bus.go @@ -5,7 +5,7 @@ import ( "github.com/armosec/armoapi-go/armotypes" "github.com/kubescape/node-agent/pkg/malwaremanager" - "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/rulemanager/types" "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" @@ -71,7 +71,7 @@ func InitExporters(exportersConfig ExportersConfig, clusterName string, nodeName return &ExporterBus{exporters: exporters} } -func (e *ExporterBus) SendRuleAlert(failedRule ruleengine.RuleFailure) { +func (e *ExporterBus) SendRuleAlert(failedRule types.RuleFailure) { for _, exporter := range e.exporters { exporter.SendRuleAlert(failedRule) } diff --git a/pkg/exporters/http_exporter.go b/pkg/exporters/http_exporter.go index c352dfded..568505353 100644 --- a/pkg/exporters/http_exporter.go +++ b/pkg/exporters/http_exporter.go @@ -13,8 +13,7 @@ import ( "time" "github.com/kubescape/node-agent/pkg/malwaremanager" - "github.com/kubescape/node-agent/pkg/ruleengine" - ruleenginev1 "github.com/kubescape/node-agent/pkg/ruleengine/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/types" "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" @@ -131,7 +130,7 @@ func (config *HTTPExporterConfig) Validate() error { } // SendRuleAlert implements the Exporter interface -func (e *HTTPExporter) SendRuleAlert(failedRule ruleengine.RuleFailure) { +func (e *HTTPExporter) SendRuleAlert(failedRule types.RuleFailure) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.config.TimeoutSeconds)*time.Second) defer cancel() @@ -151,7 +150,7 @@ func (e *HTTPExporter) SendMalwareAlert(malwareResult malwaremanager.MalwareResu } // Internal methods with context support -func (e *HTTPExporter) sendRuleAlertWithContext(ctx context.Context, failedRule ruleengine.RuleFailure) error { +func (e *HTTPExporter) sendRuleAlertWithContext(ctx context.Context, failedRule types.RuleFailure) error { if e.shouldSendLimitAlert() { return e.sendAlertLimitReached(ctx) } @@ -169,7 +168,7 @@ func (e *HTTPExporter) sendMalwareAlertWithContext(ctx context.Context, result m return e.sendAlert(ctx, alert, result.GetRuntimeProcessDetails(), nil) } -func (e *HTTPExporter) createRuleAlert(failedRule ruleengine.RuleFailure) apitypes.RuntimeAlert { +func (e *HTTPExporter) createRuleAlert(failedRule types.RuleFailure) apitypes.RuntimeAlert { k8sDetails := failedRule.GetRuntimeAlertK8sDetails() k8sDetails.NodeName = e.nodeName k8sDetails.ClusterName = e.clusterName @@ -332,8 +331,8 @@ func (e *HTTPExporter) sendAlertLimitReached(ctx context.Context) error { HostName: e.host, AlertType: apitypes.AlertTypeRule, BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - AlertName: string(AlertTypeLimitReached), - Severity: ruleenginev1.RulePrioritySystemIssue, + AlertName: string(AlertTypeLimitReached), + // Severity: ruleengine.RulePrioritySystemIssue, FixSuggestions: "Check logs for more information", }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ diff --git a/pkg/exporters/http_exporter_test.go b/pkg/exporters/http_exporter_test.go index f326117d6..791bc7bba 100644 --- a/pkg/exporters/http_exporter_test.go +++ b/pkg/exporters/http_exporter_test.go @@ -9,8 +9,7 @@ import ( "time" mmtypes "github.com/kubescape/node-agent/pkg/malwaremanager/v1/types" - "github.com/kubescape/node-agent/pkg/ruleengine" - ruleenginev1 "github.com/kubescape/node-agent/pkg/ruleengine/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/types" apitypes "github.com/armosec/armoapi-go/armotypes" igtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" @@ -37,10 +36,9 @@ func TestSendRuleAlert(t *testing.T) { assert.NoError(t, err) // Create a mock rule failure - failedRule := &ruleenginev1.GenericRuleFailure{ + failedRule := &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ AlertName: "testrule", - Severity: ruleengine.RulePriorityCritical, }, RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ ContainerID: "testcontainerid", @@ -72,7 +70,6 @@ func TestSendRuleAlert(t *testing.T) { assert.Equal(t, "kubescape.io/v1", alertsList.APIVersion) assert.Equal(t, 1, len(alertsList.Spec.Alerts)) alert := alertsList.Spec.Alerts[0] - assert.Equal(t, ruleengine.RulePriorityCritical, alert.Severity) assert.Equal(t, "testrule", alert.AlertName) assert.Equal(t, "testcontainerid", alert.ContainerID) assert.Equal(t, "testcontainer", alert.ContainerName) @@ -100,7 +97,7 @@ func TestSendRuleAlertRateReached(t *testing.T) { assert.NoError(t, err) // Create a mock rule failure - failedRule := &ruleenginev1.GenericRuleFailure{ + failedRule := &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ AlertName: "testrule", }, diff --git a/pkg/exporters/stdout_exporter.go b/pkg/exporters/stdout_exporter.go index 233bff6ee..0226e6ce2 100644 --- a/pkg/exporters/stdout_exporter.go +++ b/pkg/exporters/stdout_exporter.go @@ -6,7 +6,7 @@ import ( apitypes "github.com/armosec/armoapi-go/armotypes" "github.com/kubescape/node-agent/pkg/malwaremanager" - "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/rulemanager/types" "github.com/kubescape/node-agent/pkg/utils" log "github.com/sirupsen/logrus" @@ -36,7 +36,7 @@ func InitStdoutExporter(useStdout *bool, cloudmetadata *apitypes.CloudMetadata) } } -func (exporter *StdoutExporter) SendRuleAlert(failedRule ruleengine.RuleFailure) { +func (exporter *StdoutExporter) SendRuleAlert(failedRule types.RuleFailure) { processTree := failedRule.GetRuntimeProcessDetails().ProcessTree exporter.logger.WithFields(log.Fields{ "message": failedRule.GetRuleAlert().RuleDescription, diff --git a/pkg/exporters/stdout_exporter_test.go b/pkg/exporters/stdout_exporter_test.go index bf29d853c..b06e18f11 100644 --- a/pkg/exporters/stdout_exporter_test.go +++ b/pkg/exporters/stdout_exporter_test.go @@ -6,7 +6,7 @@ import ( "time" igtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/node-agent/pkg/ruleengine/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/types" apitypes "github.com/armosec/armoapi-go/armotypes" "github.com/stretchr/testify/assert" @@ -53,7 +53,7 @@ func TestStdoutExporter_SendAlert(t *testing.T) { exporter := InitStdoutExporter(nil, nil) assert.NotNil(t, exporter) - exporter.SendRuleAlert(&ruleengine.GenericRuleFailure{ + exporter.SendRuleAlert(&types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ AlertName: "testrule", }, diff --git a/pkg/exporters/syslog_exporter.go b/pkg/exporters/syslog_exporter.go index 6bb580e77..8f09e8ec6 100644 --- a/pkg/exporters/syslog_exporter.go +++ b/pkg/exporters/syslog_exporter.go @@ -7,7 +7,7 @@ import ( "time" "github.com/kubescape/node-agent/pkg/malwaremanager" - "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/rulemanager/types" "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" @@ -46,7 +46,7 @@ func InitSyslogExporter(syslogHost string) *SyslogExporter { } // SendRuleAlert sends an alert to syslog (RFC 5424) - https://tools.ietf.org/html/rfc5424 -func (se *SyslogExporter) SendRuleAlert(failedRule ruleengine.RuleFailure) { +func (se *SyslogExporter) SendRuleAlert(failedRule types.RuleFailure) { message := rfc5424.Message{ Priority: rfc5424.Error, Timestamp: failedRule.GetBaseRuntimeAlert().Timestamp, diff --git a/pkg/exporters/syslog_exporter_test.go b/pkg/exporters/syslog_exporter_test.go index d96591b62..109cd1f2d 100644 --- a/pkg/exporters/syslog_exporter_test.go +++ b/pkg/exporters/syslog_exporter_test.go @@ -7,7 +7,7 @@ import ( "time" mmtypes "github.com/kubescape/node-agent/pkg/malwaremanager/v1/types" - ruleenginev1 "github.com/kubescape/node-agent/pkg/ruleengine/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/types" "gopkg.in/mcuadros/go-syslog.v2" @@ -65,7 +65,7 @@ func TestSyslogExporter(t *testing.T) { } // Send an alert - syslogExp.SendRuleAlert(&ruleenginev1.GenericRuleFailure{ + syslogExp.SendRuleAlert(&types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ AlertName: "testrule", }, @@ -80,7 +80,7 @@ func TestSyslogExporter(t *testing.T) { }, }) - syslogExp.SendRuleAlert(&ruleenginev1.GenericRuleFailure{ + syslogExp.SendRuleAlert(&types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ AlertName: "testrule", }, diff --git a/pkg/exporters/utils.go b/pkg/exporters/utils.go deleted file mode 100644 index 524908409..000000000 --- a/pkg/exporters/utils.go +++ /dev/null @@ -1,31 +0,0 @@ -package exporters - -import ( - "github.com/kubescape/node-agent/pkg/ruleengine/v1" -) - -func PriorityToStatus(priority int) string { - switch priority { - case ruleengine.RulePriorityNone: - return "none" - case ruleengine.RulePriorityLow: - return "low" - case ruleengine.RulePriorityMed: - return "medium" - case ruleengine.RulePriorityHigh: - return "high" - case ruleengine.RulePriorityCritical: - return "critical" - case ruleengine.RulePrioritySystemIssue: - return "system_issue" - default: - if priority < ruleengine.RulePriorityMed { - return "low" - } else if priority < ruleengine.RulePriorityHigh { - return "medium" - } else if priority < ruleengine.RulePriorityCritical { - return "high" - } - return "unknown" - } -} diff --git a/pkg/exporters/utils_test.go b/pkg/exporters/utils_test.go deleted file mode 100644 index a2ba9bd2b..000000000 --- a/pkg/exporters/utils_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package exporters - -import ( - "testing" - - "github.com/kubescape/node-agent/pkg/ruleengine/v1" -) - -func TestPriorityToStatus(t *testing.T) { - tests := []struct { - name string - priority int - want string - }{ - { - name: "none", - priority: ruleengine.RulePriorityNone, - want: "none", - }, - { - name: "low", - priority: ruleengine.RulePriorityLow, - want: "low", - }, - { - name: "medium", - priority: ruleengine.RulePriorityMed, - want: "medium", - }, - { - name: "high", - priority: ruleengine.RulePriorityHigh, - want: "high", - }, - { - name: "critical", - priority: ruleengine.RulePriorityCritical, - want: "critical", - }, - { - name: "system_issue", - priority: ruleengine.RulePrioritySystemIssue, - want: "system_issue", - }, - { - name: "unknown", - priority: 100, - want: "unknown", - }, - { - name: "low2", - priority: ruleengine.RulePriorityMed - 1, - want: "low", - }, - { - name: "medium2", - priority: ruleengine.RulePriorityHigh - 1, - want: "medium", - }, - { - name: "high2", - priority: ruleengine.RulePriorityCritical - 1, - want: "high", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := PriorityToStatus(tt.priority); got != tt.want { - t.Errorf("PriorityToStatus() = %v, want %v", got, tt.want) - } - }) - } - -} diff --git a/pkg/metricsmanager/metrics_manager_interface.go b/pkg/metricsmanager/metrics_manager_interface.go index baee01ae7..dc1b5907b 100644 --- a/pkg/metricsmanager/metrics_manager_interface.go +++ b/pkg/metricsmanager/metrics_manager_interface.go @@ -1,6 +1,8 @@ package metricsmanager import ( + "time" + "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/top" toptypes "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/top/ebpf/types" "github.com/kubescape/node-agent/pkg/utils" @@ -14,6 +16,7 @@ type MetricsManager interface { ReportFailedEvent() ReportRuleProcessed(ruleID string) ReportRuleAlert(ruleID string) + ReportRuleEvaluationTime(ruleID string, eventType utils.EventType, duration time.Duration) ReportEbpfStats(stats *top.Event[toptypes.Stats]) ReportContainerStart() ReportContainerStop() diff --git a/pkg/metricsmanager/metrics_manager_mock.go b/pkg/metricsmanager/metrics_manager_mock.go index a0f6b3f93..3d8b4ddc0 100644 --- a/pkg/metricsmanager/metrics_manager_mock.go +++ b/pkg/metricsmanager/metrics_manager_mock.go @@ -2,6 +2,7 @@ package metricsmanager import ( "sync/atomic" + "time" "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/top" toptypes "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/top/ebpf/types" @@ -17,6 +18,7 @@ type MetricsMock struct { RuleProcessedCounter maps.SafeMap[string, int] RuleAlertCounter maps.SafeMap[string, int] EventCounter maps.SafeMap[utils.EventType, int] + RuleEvaluationTime maps.SafeMap[string, time.Duration] // key: "ruleID:eventType" } func NewMetricsMock() *MetricsMock { @@ -33,6 +35,7 @@ func (m *MetricsMock) Destroy() { m.RuleProcessedCounter.Clear() m.RuleAlertCounter.Clear() m.EventCounter.Clear() + m.RuleEvaluationTime.Clear() } func (m *MetricsMock) ReportFailedEvent() { @@ -51,6 +54,11 @@ func (m *MetricsMock) ReportRuleAlert(ruleID string) { m.RuleAlertCounter.Set(ruleID, m.RuleAlertCounter.Get(ruleID)+1) } +func (m *MetricsMock) ReportRuleEvaluationTime(ruleID string, eventType utils.EventType, duration time.Duration) { + key := ruleID + ":" + string(eventType) + m.RuleEvaluationTime.Set(key, duration) +} + func (m *MetricsMock) ReportEbpfStats(stats *top.Event[toptypes.Stats]) { } diff --git a/pkg/metricsmanager/prometheus/prometheus.go b/pkg/metricsmanager/prometheus/prometheus.go index 9add46988..29dd01bb1 100644 --- a/pkg/metricsmanager/prometheus/prometheus.go +++ b/pkg/metricsmanager/prometheus/prometheus.go @@ -3,6 +3,7 @@ package metricsmanager import ( "net/http" "sync" + "time" "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/top" toptypes "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/top/ebpf/types" @@ -19,6 +20,7 @@ const ( prometheusRuleIdLabel = "rule_id" programTypeLabel = "program_type" programNameLabel = "program_name" + eventTypeLabel = "event_type" ) var _ metricsmanager.MetricsManager = (*PrometheusMetric)(nil) @@ -40,6 +42,7 @@ type PrometheusMetric struct { ebpfIoUringCounter prometheus.Counter ruleCounter *prometheus.CounterVec alertCounter *prometheus.CounterVec + ruleEvaluationTime *prometheus.HistogramVec // Program ID metrics programRuntimeGauge *prometheus.GaugeVec @@ -127,6 +130,11 @@ func NewPrometheusMetric() *PrometheusMetric { Name: "node_agent_alert_counter", Help: "The total number of alerts sent by the engine", }, []string{prometheusRuleIdLabel}), + ruleEvaluationTime: promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "node_agent_rule_evaluation_time_seconds", + Help: "Time taken to evaluate a rule by rule ID and event type", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), // 1ms to 1024s + }, []string{prometheusRuleIdLabel, eventTypeLabel}), // Program ID metrics programRuntimeGauge: promauto.NewGaugeVec(prometheus.GaugeOpts{ @@ -205,6 +213,7 @@ func (p *PrometheusMetric) Destroy() { prometheus.Unregister(p.ebpfFailedCounter) prometheus.Unregister(p.ruleCounter) prometheus.Unregister(p.alertCounter) + prometheus.Unregister(p.ruleEvaluationTime) prometheus.Unregister(p.ebpfSymlinkCounter) prometheus.Unregister(p.ebpfHardlinkCounter) prometheus.Unregister(p.ebpfSSHCounter) @@ -315,6 +324,14 @@ func (p *PrometheusMetric) ReportRuleAlert(ruleID string) { p.getCachedAlertCounter(ruleID).Inc() } +func (p *PrometheusMetric) ReportRuleEvaluationTime(ruleID string, eventType utils.EventType, duration time.Duration) { + labels := prometheus.Labels{ + prometheusRuleIdLabel: ruleID, + eventTypeLabel: string(eventType), + } + p.ruleEvaluationTime.With(labels).Observe(duration.Seconds()) +} + func (p *PrometheusMetric) ReportEbpfStats(stats *top.Event[toptypes.Stats]) { logger.L().Debug("reporting ebpf stats", helpers.Int("stats_count", len(stats.Stats))) diff --git a/pkg/ruleengine/v1/mock.go b/pkg/objectcache/v1/mock.go similarity index 86% rename from pkg/ruleengine/v1/mock.go rename to pkg/objectcache/v1/mock.go index 673170ebb..c6cdeeb94 100644 --- a/pkg/ruleengine/v1/mock.go +++ b/pkg/objectcache/v1/mock.go @@ -1,30 +1,27 @@ -package ruleengine +package objectcache import ( "context" + corev1 "k8s.io/api/core/v1" + "github.com/goradd/maps" containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" "github.com/kubescape/node-agent/pkg/objectcache" "github.com/kubescape/node-agent/pkg/objectcache/applicationprofilecache/callstackcache" "github.com/kubescape/node-agent/pkg/watcher" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" ) -var _ objectcache.ApplicationProfileCache = (*RuleObjectCacheMock)(nil) -var _ objectcache.K8sObjectCache = (*RuleObjectCacheMock)(nil) -var _ objectcache.NetworkNeighborhoodCache = (*RuleObjectCacheMock)(nil) -var _ objectcache.DnsCache = (*RuleObjectCacheMock)(nil) - +// RuleObjectCacheMock implementation as provided type RuleObjectCacheMock struct { profile *v1beta1.ApplicationProfile podSpec *corev1.PodSpec podStatus *corev1.PodStatus nn *v1beta1.NetworkNeighborhood dnsCache map[string]string - containerIDToSharedData maps.SafeMap[string, *objectcache.WatchedContainerData] + ContainerIDToSharedData *maps.SafeMap[string, *objectcache.WatchedContainerData] } func (r *RuleObjectCacheMock) GetApplicationProfile(string) *v1beta1.ApplicationProfile { @@ -46,9 +43,11 @@ func (r *RuleObjectCacheMock) ApplicationProfileCache() objectcache.ApplicationP func (r *RuleObjectCacheMock) GetPodSpec(_, _ string) *corev1.PodSpec { return r.podSpec } + func (r *RuleObjectCacheMock) GetPodStatus(_, _ string) *corev1.PodStatus { return r.podStatus } + func (r *RuleObjectCacheMock) SetPodSpec(podSpec *corev1.PodSpec) { r.podSpec = podSpec } @@ -70,17 +69,18 @@ func (r *RuleObjectCacheMock) GetPods() []*corev1.Pod { } func (r *RuleObjectCacheMock) SetSharedContainerData(containerID string, data *objectcache.WatchedContainerData) { - r.containerIDToSharedData.Set(containerID, data) + r.ContainerIDToSharedData.Set(containerID, data) } + func (r *RuleObjectCacheMock) GetSharedContainerData(containerID string) *objectcache.WatchedContainerData { - if data, ok := r.containerIDToSharedData.Load(containerID); ok { + if data, ok := r.ContainerIDToSharedData.Load(containerID); ok { return data } - return nil } + func (r *RuleObjectCacheMock) DeleteSharedContainerData(containerID string) { - r.containerIDToSharedData.Delete(containerID) + r.ContainerIDToSharedData.Delete(containerID) } func (r *RuleObjectCacheMock) K8sObjectCache() objectcache.K8sObjectCache { @@ -108,10 +108,9 @@ func (r *RuleObjectCacheMock) SetDnsCache(dnsCache map[string]string) { } func (r *RuleObjectCacheMock) ResolveIpToDomain(ip string) string { - if _, ok := r.dnsCache[ip]; ok { - return r.dnsCache[ip] + if domain, ok := r.dnsCache[ip]; ok { + return domain } - return "" } diff --git a/pkg/processtree/creator/exit_manager.go b/pkg/processtree/creator/exit_manager.go index d1b478729..5d645f8a3 100644 --- a/pkg/processtree/creator/exit_manager.go +++ b/pkg/processtree/creator/exit_manager.go @@ -174,5 +174,4 @@ func (pt *processTreeCreatorImpl) exitByPid(pid uint32) { pt.processMap.Delete(pid) delete(pt.pendingExits, pid) - } diff --git a/pkg/processtree/creator/processtree_creator.go b/pkg/processtree/creator/processtree_creator.go index 3c5793318..3e1bc1d5f 100644 --- a/pkg/processtree/creator/processtree_creator.go +++ b/pkg/processtree/creator/processtree_creator.go @@ -12,7 +12,7 @@ import ( containerprocesstree "github.com/kubescape/node-agent/pkg/processtree/container" "github.com/kubescape/node-agent/pkg/processtree/conversion" "github.com/kubescape/node-agent/pkg/processtree/reparenting" -) +) type processTreeCreatorImpl struct { processMap maps.SafeMap[uint32, *apitypes.Process] // PID -> Process diff --git a/pkg/rulebindingmanager/cache/cache.go b/pkg/rulebindingmanager/cache/cache.go index fd0b7aed6..01fa6dcf2 100644 --- a/pkg/rulebindingmanager/cache/cache.go +++ b/pkg/rulebindingmanager/cache/cache.go @@ -3,16 +3,20 @@ package cache import ( "context" "strings" + "sync" + "time" mapset "github.com/deckarep/golang-set/v2" "github.com/goradd/maps" + "github.com/hashicorp/golang-lru/v2/expirable" "github.com/kubescape/go-logger" "github.com/kubescape/go-logger/helpers" + "github.com/kubescape/node-agent/pkg/config" "github.com/kubescape/node-agent/pkg/k8sclient" "github.com/kubescape/node-agent/pkg/rulebindingmanager" typesv1 "github.com/kubescape/node-agent/pkg/rulebindingmanager/types/v1" - "github.com/kubescape/node-agent/pkg/ruleengine" - ruleenginev1 "github.com/kubescape/node-agent/pkg/ruleengine/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/rulecreator" + rulemanagertypesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/node-agent/pkg/watcher" corev1 "k8s.io/api/core/v1" @@ -26,47 +30,71 @@ var _ rulebindingmanager.RuleBindingCache = (*RBCache)(nil) var _ watcher.Adaptor = (*RBCache)(nil) type RBCache struct { + config config.Config nodeName string k8sClient k8sclient.K8sClientInterface allPods mapset.Set[string] // set of all pods (also pods without rules) podToRBNames maps.SafeMap[string, mapset.Set[string]] // podID -> []rule binding names rbNameToRB maps.SafeMap[string, typesv1.RuntimeAlertRuleBinding] // rule binding name -> rule binding - rbNameToRules maps.SafeMap[string, []ruleengine.RuleEvaluator] // rule binding name -> []created rules + rbNameToRules maps.SafeMap[string, []rulemanagertypesv1.Rule] // rule binding name -> []created rules rbNameToPods maps.SafeMap[string, mapset.Set[string]] // rule binding name -> podIDs - ruleCreator ruleengine.RuleCreator + ruleCreator rulecreator.RuleCreator watchResources []watcher.WatchResource notifiers []*chan rulebindingmanager.RuleBindingNotify + mutex sync.RWMutex + rulesForPod *expirable.LRU[string, []rulemanagertypesv1.Rule] } -func NewCache(nodeName string, k8sClient k8sclient.K8sClientInterface) *RBCache { +func NewCache(config config.Config, k8sClient k8sclient.K8sClientInterface, ruleCreator rulecreator.RuleCreator) *RBCache { return &RBCache{ - nodeName: nodeName, + config: config, + nodeName: config.NodeName, k8sClient: k8sClient, - ruleCreator: ruleenginev1.NewRuleCreator(), + ruleCreator: ruleCreator, allPods: mapset.NewSet[string](), rbNameToRB: maps.SafeMap[string, typesv1.RuntimeAlertRuleBinding]{}, podToRBNames: maps.SafeMap[string, mapset.Set[string]]{}, rbNameToPods: maps.SafeMap[string, mapset.Set[string]]{}, - watchResources: resourcesToWatch(nodeName), + watchResources: resourcesToWatch(config.NodeName), + rulesForPod: expirable.NewLRU[string, []rulemanagertypesv1.Rule](1000, nil, 5*time.Second), } } // ----------------- watcher.WatchResources methods ----------------- func (c *RBCache) WatchResources() []watcher.WatchResource { + c.mutex.RLock() + defer c.mutex.RUnlock() return c.watchResources } // ------------------ rulebindingmanager.RuleBindingCache methods ----------------------- -func (c *RBCache) ListRulesForPod(namespace, name string) []ruleengine.RuleEvaluator { - var rulesSlice []ruleengine.RuleEvaluator +func (c *RBCache) ListRulesForPod(namespace, name string) []rulemanagertypesv1.Rule { + if c.config.IgnoreRuleBindings { + podID := utils.CreateK8sPodID(namespace, name) + rules, ok := c.rulesForPod.Get(podID) + if ok { + return rules + } + rules = c.getRules() + c.rulesForPod.Add(podID, rules) + return rules + } podID := utils.CreateK8sPodID(namespace, name) - if !c.podToRBNames.Has(podID) { + + var rulesSlice []rulemanagertypesv1.Rule + + rulesSlice, ok := c.rulesForPod.Get(podID) + if ok { return rulesSlice } + if !c.podToRBNames.Has(podID) { + return nil + } + //append rules for pod rbNames := c.podToRBNames.Get(podID) for _, i := range rbNames.ToSlice() { @@ -75,16 +103,23 @@ func (c *RBCache) ListRulesForPod(namespace, name string) []ruleengine.RuleEvalu } } + c.rulesForPod.Add(podID, rulesSlice) + return rulesSlice } func (c *RBCache) AddNotifier(n *chan rulebindingmanager.RuleBindingNotify) { + c.mutex.Lock() + defer c.mutex.Unlock() c.notifiers = append(c.notifiers, n) } // ------------------ watcher.Watcher methods ----------------------- func (c *RBCache) AddHandler(ctx context.Context, obj runtime.Object) { + c.mutex.Lock() + defer c.mutex.Unlock() + var rbs []rulebindingmanager.RuleBindingNotify if pod, ok := obj.(*corev1.Pod); ok { @@ -106,6 +141,9 @@ func (c *RBCache) AddHandler(ctx context.Context, obj runtime.Object) { } func (c *RBCache) ModifyHandler(ctx context.Context, obj runtime.Object) { + c.mutex.Lock() + defer c.mutex.Unlock() + var rbs []rulebindingmanager.RuleBindingNotify if pod, ok := obj.(*corev1.Pod); ok { @@ -127,6 +165,9 @@ func (c *RBCache) ModifyHandler(ctx context.Context, obj runtime.Object) { } func (c *RBCache) DeleteHandler(_ context.Context, obj runtime.Object) { + c.mutex.Lock() + defer c.mutex.Unlock() + var rbs []rulebindingmanager.RuleBindingNotify if pod, ok := obj.(*corev1.Pod); ok { @@ -143,6 +184,16 @@ func (c *RBCache) DeleteHandler(_ context.Context, obj runtime.Object) { } } +func (c *RBCache) RefreshRuleBindingsRules() { + c.mutex.Lock() + defer c.mutex.Unlock() + for _, rbName := range c.rbNameToRB.Keys() { + rb := c.rbNameToRB.Get(rbName) + c.rbNameToRules.Set(rbName, c.createRules(rb.Spec.Rules)) + } + logger.L().Info("RBCache - refreshed rule bindings rules", helpers.Int("ruleBindings", len(c.rbNameToRB.Keys()))) +} + // ----------------- RuleBinding manager methods ----------------- // AddRuleBinding adds a rule binding to the cache @@ -338,47 +389,35 @@ func (c *RBCache) deletePod(uniqueName string) { c.podToRBNames.Delete(uniqueName) } -func (c *RBCache) createRules(rulesForPod []typesv1.RuntimeAlertRuleBindingRule) []ruleengine.RuleEvaluator { - var rules []ruleengine.RuleEvaluator +func (c *RBCache) createRules(rulesForPod []typesv1.RuntimeAlertRuleBindingRule) []rulemanagertypesv1.Rule { + var rules []rulemanagertypesv1.Rule // Get the rules that are bound to the container for _, ruleParams := range rulesForPod { rules = append(rules, c.createRule(&ruleParams)...) } return rules } -func (c *RBCache) createRule(r *typesv1.RuntimeAlertRuleBindingRule) []ruleengine.RuleEvaluator { - +func (c *RBCache) createRule(r *typesv1.RuntimeAlertRuleBindingRule) []rulemanagertypesv1.Rule { if r.RuleID != "" { - if ruleDesc := c.ruleCreator.CreateRuleByID(r.RuleID); ruleDesc != nil { - if r.Parameters != nil { - ruleDesc.SetParameters(r.Parameters) - } - return []ruleengine.RuleEvaluator{ruleDesc} - } + rule := c.ruleCreator.CreateRuleByID(r.RuleID) + return []rulemanagertypesv1.Rule{rule} } if r.RuleName != "" { - if ruleDesc := c.ruleCreator.CreateRuleByName(r.RuleName); ruleDesc != nil { - if r.Parameters != nil { - ruleDesc.SetParameters(r.Parameters) - } - return []ruleengine.RuleEvaluator{ruleDesc} - } + rule := c.ruleCreator.CreateRuleByName(r.RuleName) + return []rulemanagertypesv1.Rule{rule} } if len(r.RuleTags) > 0 { - if ruleTagsDescs := c.ruleCreator.CreateRulesByTags(r.RuleTags); ruleTagsDescs != nil { - for _, ruleDesc := range ruleTagsDescs { - if r.Parameters != nil { - ruleDesc.SetParameters(r.Parameters) - } - } - return ruleTagsDescs - } + rules := c.ruleCreator.CreateRulesByTags(r.RuleTags) + return rules } - return []ruleengine.RuleEvaluator{} + + return []rulemanagertypesv1.Rule{} } // Expose the rule creator to be able to create rules from third party. -func (c *RBCache) GetRuleCreator() ruleengine.RuleCreator { +func (c *RBCache) GetRuleCreator() rulecreator.RuleCreator { + c.mutex.RLock() + defer c.mutex.RUnlock() return c.ruleCreator } @@ -405,3 +444,7 @@ func diff(a, b []rulebindingmanager.RuleBindingNotify) []rulebindingmanager.Rule return diff } + +func (c *RBCache) getRules() []rulemanagertypesv1.Rule { + return c.ruleCreator.CreateAllRules() +} diff --git a/pkg/rulebindingmanager/cache/cache_test.go b/pkg/rulebindingmanager/cache/cache_test.go index 68b9ab907..ed0516009 100644 --- a/pkg/rulebindingmanager/cache/cache_test.go +++ b/pkg/rulebindingmanager/cache/cache_test.go @@ -12,7 +12,7 @@ import ( "github.com/kubescape/node-agent/mocks" "github.com/kubescape/node-agent/pkg/rulebindingmanager" typesv1 "github.com/kubescape/node-agent/pkg/rulebindingmanager/types/v1" - "github.com/kubescape/node-agent/pkg/ruleengine" + rulemanagertypesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -166,7 +166,7 @@ func TestRuntimeObjAddHandler(t *testing.T) { r := tt.args.c.ListRulesForPod(tt.args.pod.GetNamespace(), tt.args.pod.GetName()) assert.Equal(t, len(tt.expectedRules), len(r)) for i := range r { - assert.Equal(t, tt.expectedRules[i].ruleID, r[i].ID()) + assert.Equal(t, tt.expectedRules[i].ruleID, r[i].ID) } }) @@ -223,56 +223,6 @@ func TestDeletePod(t *testing.T) { }) } } -func TestCreateRule(t *testing.T) { - c := NewCacheMock("") - tests := []struct { - name string - rule *typesv1.RuntimeAlertRuleBindingRule - expected []ruleengine.RuleEvaluator - }{ - { - name: "Test with RuleID", - rule: &typesv1.RuntimeAlertRuleBindingRule{ - RuleID: "rule-1", - Parameters: map[string]interface{}{"param1": "value1"}, - }, - expected: []ruleengine.RuleEvaluator{&ruleengine.RuleMock{RuleID: "rule-1", RuleParameters: map[string]interface{}{"param1": "value1"}}}, - }, - { - name: "Test with RuleName", - rule: &typesv1.RuntimeAlertRuleBindingRule{ - RuleName: "rule-1", - Parameters: map[string]interface{}{"param1": "value1"}, - }, - expected: []ruleengine.RuleEvaluator{&ruleengine.RuleMock{RuleName: "rule-1", RuleParameters: map[string]interface{}{"param1": "value1"}}}, - }, - { - name: "Test with RuleTags", - rule: &typesv1.RuntimeAlertRuleBindingRule{ - RuleTags: []string{"tag1", "tag2"}, - Parameters: map[string]interface{}{"param1": "value1"}, - }, - expected: []ruleengine.RuleEvaluator{&ruleengine.RuleMock{RuleName: "tag1", RuleParameters: map[string]interface{}{"param1": "value1"}}, &ruleengine.RuleMock{RuleName: "tag2", RuleParameters: map[string]interface{}{"param1": "value1"}}}, - }, - { - name: "Test with no RuleID, RuleName, or RuleTags", - rule: &typesv1.RuntimeAlertRuleBindingRule{}, - expected: []ruleengine.RuleEvaluator{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := c.createRule(tt.rule) - assert.Equal(t, len(tt.expected), len(result)) - for i := range result { - assert.Equal(t, tt.expected[i].Name(), result[i].Name()) - assert.Equal(t, tt.expected[i].ID(), result[i].ID()) - assert.Equal(t, tt.expected[i].GetParameters(), result[i].GetParameters()) - } - }) - } -} func TestDeleteHandler(t *testing.T) { type expected struct { @@ -613,7 +563,7 @@ func TestDeleteRuleBinding(t *testing.T) { for k, v := range tt.podToRBNames { for _, s := range v { c.rbNameToRB.Set(s, typesv1.RuntimeAlertRuleBinding{}) - c.rbNameToRules.Set(s, []ruleengine.RuleEvaluator{&ruleengine.RuleMock{}}) + c.rbNameToRules.Set(s, []rulemanagertypesv1.Rule{rulemanagertypesv1.Rule{}}) if !c.rbNameToPods.Has(s) { c.rbNameToPods.Set(s, mapset.NewSet[string]()) diff --git a/pkg/rulebindingmanager/cache/mock.go b/pkg/rulebindingmanager/cache/mock.go index 1b5cbe11a..892043c23 100644 --- a/pkg/rulebindingmanager/cache/mock.go +++ b/pkg/rulebindingmanager/cache/mock.go @@ -1,10 +1,14 @@ package cache import ( + "time" + mapset "github.com/deckarep/golang-set/v2" "github.com/goradd/maps" + "github.com/hashicorp/golang-lru/v2/expirable" "github.com/kubescape/k8s-interface/k8sinterface" - "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/rulemanager/rulecreator" + rulemanagertypesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" ) func NewCacheMock(nodeName string) *RBCache { @@ -12,8 +16,9 @@ func NewCacheMock(nodeName string) *RBCache { nodeName: nodeName, allPods: mapset.NewSet[string](), k8sClient: k8sinterface.NewKubernetesApiMock(), - ruleCreator: &ruleengine.RuleCreatorMock{}, + ruleCreator: &rulecreator.RuleCreatorMock{}, podToRBNames: maps.SafeMap[string, mapset.Set[string]]{}, rbNameToPods: maps.SafeMap[string, mapset.Set[string]]{}, + rulesForPod: expirable.NewLRU[string, []rulemanagertypesv1.Rule](1000, nil, 60*time.Second), } } diff --git a/pkg/rulebindingmanager/rulebindingmanager_interface.go b/pkg/rulebindingmanager/rulebindingmanager_interface.go index 993dad242..5caeabb92 100644 --- a/pkg/rulebindingmanager/rulebindingmanager_interface.go +++ b/pkg/rulebindingmanager/rulebindingmanager_interface.go @@ -1,11 +1,13 @@ package rulebindingmanager import ( - "github.com/kubescape/node-agent/pkg/ruleengine" + "github.com/kubescape/node-agent/pkg/rulemanager/rulecreator" + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" ) type RuleBindingCache interface { - ListRulesForPod(namespace, name string) []ruleengine.RuleEvaluator + ListRulesForPod(namespace, name string) []typesv1.Rule AddNotifier(*chan RuleBindingNotify) - GetRuleCreator() ruleengine.RuleCreator + GetRuleCreator() rulecreator.RuleCreator + RefreshRuleBindingsRules() } diff --git a/pkg/rulebindingmanager/rulebindingmanager_mock.go b/pkg/rulebindingmanager/rulebindingmanager_mock.go index fa6579868..1ea227f34 100644 --- a/pkg/rulebindingmanager/rulebindingmanager_mock.go +++ b/pkg/rulebindingmanager/rulebindingmanager_mock.go @@ -1,18 +1,24 @@ package rulebindingmanager -import "github.com/kubescape/node-agent/pkg/ruleengine" +import ( + "github.com/kubescape/node-agent/pkg/rulemanager/rulecreator" + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" +) var _ RuleBindingCache = (*RuleBindingCacheMock)(nil) type RuleBindingCacheMock struct { } -func (r *RuleBindingCacheMock) ListRulesForPod(_, _ string) []ruleengine.RuleEvaluator { - return []ruleengine.RuleEvaluator{} +func (r *RuleBindingCacheMock) ListRulesForPod(_, _ string) []typesv1.Rule { + return []typesv1.Rule{} } func (r *RuleBindingCacheMock) AddNotifier(_ *chan RuleBindingNotify) { } -func (r *RuleBindingCacheMock) GetRuleCreator() ruleengine.RuleCreator { +func (r *RuleBindingCacheMock) GetRuleCreator() rulecreator.RuleCreator { return nil } + +func (r *RuleBindingCacheMock) RefreshRuleBindingsRules() { +} diff --git a/pkg/ruleengine/ruleengine_interface.go b/pkg/ruleengine/ruleengine_interface.go deleted file mode 100644 index 825838ab6..000000000 --- a/pkg/ruleengine/ruleengine_interface.go +++ /dev/null @@ -1,158 +0,0 @@ -package ruleengine - -import ( - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - igtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" -) - -const ( - RulePriorityNone = 0 - RulePriorityLow = 1 - RulePriorityMed = 5 - RulePriorityHigh = 8 - RulePriorityCritical = 10 - RulePrioritySystemIssue = 1000 -) - -type DetectionInfo interface{} - -type DetectionResult struct { - IsFailure bool - Payload DetectionInfo -} - -type RuleDescriptor struct { - // Rule ID - ID string - // Rule Name - Name string - // Rule Description - Description string - // Priority - Priority int - // Tags - Tags []string - // Rule requirements - Requirements RuleSpec - // Create a rule function - RuleCreationFunc func() RuleEvaluator - // RulePolicySupport indicates if the rule supports policy - RulePolicySupport bool -} - -// ProfileRequirement indicates how a rule uses profiles -type ProfileRequirement struct { - // ProfileDependency indicates if the rule requires a profile - ProfileDependency apitypes.ProfileDependency - - // ProfileType indicates what type of profile is needed (Application, Network, etc) - ProfileType apitypes.ProfileType -} - -func (r *RuleDescriptor) HasTags(tags []string) bool { - for _, tag := range tags { - for _, ruleTag := range r.Tags { - if tag == ruleTag { - return true - } - } - } - return false -} - -// RuleCreator is an interface for creating rules by tags, IDs, and names -type RuleCreator interface { - CreateRulesByTags(tags []string) []RuleEvaluator - CreateRuleByID(id string) RuleEvaluator - CreateRuleByName(name string) RuleEvaluator - RegisterRule(rule RuleDescriptor) - CreateRulesByEventType(eventType utils.EventType) []RuleEvaluator - CreateRulePolicyRulesByEventType(eventType utils.EventType) []RuleEvaluator - CreateAllRules() []RuleEvaluator - GetAllRuleIDs() []string -} - -type RuleEvaluator interface { - // Rule ID - this is the rules unique identifier - ID() string - - // Rule Name - Name() string - - // EvaluateRule evaluates the rule without profile - EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) DetectionResult - - // EvaluateRuleWithProfile evaluates the rule with profile - EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (DetectionResult, error) - - // CreateRuleFailure creates a rule failure - CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload DetectionResult) RuleFailure - - // Rule requirements - Requirements() RuleSpec - - // Set rule parameters - SetParameters(parameters map[string]interface{}) - - // Get rule parameters - GetParameters() map[string]interface{} -} - -type RuleCondition interface { - EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) DetectionResult - ID() string -} - -// RuleSpec is an interface for rule requirements -type RuleSpec interface { - // Event types required for the rule - RequiredEventTypes() []utils.EventType - - // Profile requirements - GetProfileRequirements() ProfileRequirement -} - -type RuleFailure interface { - // Get Base Runtime Alert - GetBaseRuntimeAlert() apitypes.BaseRuntimeAlert - // Get Alert Type - GetAlertType() apitypes.AlertType - // Get Runtime Process Details - GetRuntimeProcessDetails() apitypes.ProcessTree - // Get Trigger Event - GetTriggerEvent() igtypes.Event - // Get Rule Description - GetRuleAlert() apitypes.RuleAlert - // Get K8s Runtime Details - GetRuntimeAlertK8sDetails() apitypes.RuntimeAlertK8sDetails - // Get Rule ID - GetRuleId() string - // Get Cloud Services - GetCloudServices() []string - // Get Http Details - GetHttpRuleAlert() apitypes.HttpRuleAlert - // Get Alert Platform - GetAlertPlatform() apitypes.AlertSourcePlatform - // Get Extra - GetExtra() interface{} - - // Set Workload Details - SetWorkloadDetails(workloadDetails string) - // Set Base Runtime Alert - SetBaseRuntimeAlert(baseRuntimeAlert apitypes.BaseRuntimeAlert) - // Set Runtime Process Details - SetRuntimeProcessDetails(runtimeProcessDetails apitypes.ProcessTree) - // Set Trigger Event - SetTriggerEvent(triggerEvent igtypes.Event) - // Set Rule Description - SetRuleAlert(ruleAlert apitypes.RuleAlert) - // Set K8s Runtime Details - SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails apitypes.RuntimeAlertK8sDetails) - // Set Cloud Services - SetCloudServices(cloudServices []string) - // Set Alert Platform - SetAlertPlatform(alertPlatform apitypes.AlertSourcePlatform) -} diff --git a/pkg/ruleengine/ruleengine_mock.go b/pkg/ruleengine/ruleengine_mock.go deleted file mode 100644 index 14abfe31b..000000000 --- a/pkg/ruleengine/ruleengine_mock.go +++ /dev/null @@ -1,101 +0,0 @@ -package ruleengine - -import ( - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/utils" -) - -var _ RuleCreator = (*RuleCreatorMock)(nil) - -type RuleCreatorMock struct { -} - -func (r *RuleCreatorMock) CreateRulesByTags(tags []string) []RuleEvaluator { - var rl []RuleEvaluator - for _, t := range tags { - rl = append(rl, &RuleMock{RuleName: t}) - } - return rl -} - -func (r *RuleCreatorMock) CreateRuleByID(id string) RuleEvaluator { - return &RuleMock{RuleID: id} -} - -func (r *RuleCreatorMock) CreateRuleByName(name string) RuleEvaluator { - return &RuleMock{RuleName: name} -} - -func (r *RuleCreatorMock) RegisterRule(rule RuleDescriptor) { -} - -func (r *RuleCreatorMock) CreateRulesByEventType(eventType utils.EventType) []RuleEvaluator { - return []RuleEvaluator{} -} - -func (r *RuleCreatorMock) CreateRulePolicyRulesByEventType(eventType utils.EventType) []RuleEvaluator { - return []RuleEvaluator{} -} - -func (r *RuleCreatorMock) CreateAllRules() []RuleEvaluator { - return []RuleEvaluator{} -} - -func (r *RuleCreatorMock) GetAllRuleIDs() []string { - return []string{} -} - -var _ RuleEvaluator = (*RuleMock)(nil) - -type RuleMock struct { - RuleRequirements RuleSpec - RuleParameters map[string]interface{} - RuleName string - RuleID string -} - -func (rule *RuleMock) Name() string { - return rule.RuleName -} - -func (rule *RuleMock) ID() string { - return rule.RuleID -} - -func (rule *RuleMock) DeleteRule() { -} - -func (rule *RuleMock) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) DetectionResult { - return DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *RuleMock) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (DetectionResult, error) { - return DetectionResult{IsFailure: false, Payload: nil}, nil -} - -func (rule *RuleMock) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload DetectionResult) RuleFailure { - return nil -} - -func (rule *RuleMock) Requirements() RuleSpec { - return rule.RuleRequirements -} -func (rule *RuleMock) GetParameters() map[string]interface{} { - return rule.RuleParameters -} -func (rule *RuleMock) SetParameters(p map[string]interface{}) { - rule.RuleParameters = p -} - -var _ RuleSpec = (*RuleSpecMock)(nil) - -type RuleSpecMock struct { -} - -func (ruleSpec *RuleSpecMock) RequiredEventTypes() []utils.EventType { - return []utils.EventType{} -} - -func (ruleSpec *RuleSpecMock) GetProfileRequirements() ProfileRequirement { - return ProfileRequirement{} -} diff --git a/pkg/ruleengine/v1/_factory_test.go b/pkg/ruleengine/v1/_factory_test.go deleted file mode 100644 index f7d48d340..000000000 --- a/pkg/ruleengine/v1/_factory_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package ruleengine - -import ( - "testing" -) - -// @david - Fix the test cases. - -// Test CreateRulesByTags -func TestCreateRulesByTags(t *testing.T) { - // Create a new rule - rules := CreateRulesByTags([]string{"exec"}) - // Assert r is not nil - if rules == nil { - t.Errorf("Expected rules to not be nil") - } -} - -// Test CreateRulesByNames -func TestCreateRulesByNames(t *testing.T) { - // Create a new rule - rules := CreateRulesByNames([]string{R0001UnexpectedProcessLaunchedRuleName}) - // Assert r is not nil - if rules == nil || len(rules) != 1 { - t.Errorf("Expected rules to not be nil") - } -} - -// Test CreateRuleByName -func TestCreateRuleByName(t *testing.T) { - // Create a new rule - rule := CreateRuleByName(R0001UnexpectedProcessLaunchedRuleName) - // Assert r is not nil - if rule == nil { - t.Errorf("Expected rule to not be nil") - } - // not exist - rule = CreateRuleByName("not exist") - // Assert r is not nil - if rule != nil { - t.Errorf("Expected rule to be nil") - } -} - -// Test CreateRuleByID -func TestCreateRuleByID(t *testing.T) { - rule := CreateRuleByID(R0001ID) - // Assert r is not nil - if rule == nil { - t.Errorf("Expected rule to not be nil") - } - // not exist - rule = CreateRuleByID("not exist") - // Assert r is not nil - if rule != nil { - t.Errorf("Expected rule to be nil") - } -} diff --git a/pkg/ruleengine/v1/factory.go b/pkg/ruleengine/v1/factory.go deleted file mode 100644 index f6dbaad5c..000000000 --- a/pkg/ruleengine/v1/factory.go +++ /dev/null @@ -1,132 +0,0 @@ -package ruleengine - -import ( - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" -) - -var _ ruleengine.RuleCreator = (*RuleCreatorImpl)(nil) - -type RuleCreatorImpl struct { - RuleDescriptions []ruleengine.RuleDescriptor -} - -func NewRuleCreator() *RuleCreatorImpl { - return &RuleCreatorImpl{ - RuleDescriptions: []ruleengine.RuleDescriptor{ - R0001UnexpectedProcessLaunchedRuleDescriptor, - R0002UnexpectedFileAccessRuleDescriptor, - R0003UnexpectedSystemCallRuleDescriptor, - R0004UnexpectedCapabilityUsedRuleDescriptor, - R0005UnexpectedDomainRequestRuleDescriptor, - R0006UnexpectedServiceAccountTokenAccessRuleDescriptor, - R0007KubernetesClientExecutedDescriptor, - R0008ReadEnvironmentVariablesProcFSRuleDescriptor, - R0009EbpfProgramLoadRuleDescriptor, - R0010UnexpectedSensitiveFileAccessRuleDescriptor, - R0011UnexpectedEgressNetworkTrafficRuleDescriptor, - R1000ExecFromMaliciousSourceDescriptor, - R1001ExecBinaryNotInBaseImageRuleDescriptor, - R1002LoadKernelModuleRuleDescriptor, - R1003MaliciousSSHConnectionRuleDescriptor, - R1004ExecFromMountRuleDescriptor, - R1005FilelessExecutionRuleDescriptor, - R1006UnshareSyscallRuleDescriptor, - R1007XMRCryptoMiningRuleDescriptor, - R1008CryptoMiningDomainCommunicationRuleDescriptor, - R1009CryptoMiningRelatedPortRuleDescriptor, - R1010SymlinkCreatedOverSensitiveFileRuleDescriptor, - R1011LdPreloadHookRuleDescriptor, - R1012HardlinkCreatedOverSensitiveFileRuleDescriptor, - R1015MaliciousPtraceUsageRuleDescriptor, - R1030UnexpectedIouringOperationRuleDescriptor, - }, - } -} - -func (r *RuleCreatorImpl) CreateRulesByTags(tags []string) []ruleengine.RuleEvaluator { - var rules []ruleengine.RuleEvaluator - for _, rule := range r.RuleDescriptions { - if rule.HasTags(tags) { - rules = append(rules, rule.RuleCreationFunc()) - } - } - return rules -} - -func (r *RuleCreatorImpl) CreateRuleByID(id string) ruleengine.RuleEvaluator { - for _, rule := range r.RuleDescriptions { - if rule.ID == id { - return rule.RuleCreationFunc() - } - } - return nil -} - -func (r *RuleCreatorImpl) CreateRuleByName(name string) ruleengine.RuleEvaluator { - for _, rule := range r.RuleDescriptions { - if rule.Name == name { - return rule.RuleCreationFunc() - } - } - return nil -} - -func (r *RuleCreatorImpl) GetAllRuleDescriptors() []ruleengine.RuleDescriptor { - return r.RuleDescriptions -} - -func (r *RuleCreatorImpl) RegisterRule(rule ruleengine.RuleDescriptor) { - r.RuleDescriptions = append(r.RuleDescriptions, rule) -} - -func (r *RuleCreatorImpl) CreateRulesByEventType(eventType utils.EventType) []ruleengine.RuleEvaluator { - var rules []ruleengine.RuleEvaluator - for _, rule := range r.RuleDescriptions { - if containsEventType(rule.Requirements.RequiredEventTypes(), eventType) { - rules = append(rules, rule.RuleCreationFunc()) - } - } - return rules -} - -func (r *RuleCreatorImpl) CreateRulePolicyRulesByEventType(eventType utils.EventType) []ruleengine.RuleEvaluator { - ruleCreator := NewRuleCreator() - rules := ruleCreator.CreateRulesByEventType(eventType) - for _, rule := range rules { - for _, descriptor := range r.RuleDescriptions { - if descriptor.ID == rule.ID() { - if descriptor.RulePolicySupport { - rules = append(rules, rule) - } - } - } - } - - return rules -} - -func (r *RuleCreatorImpl) GetAllRuleIDs() []string { - var ruleIDs []string - for _, rule := range r.RuleDescriptions { - ruleIDs = append(ruleIDs, rule.ID) - } - return ruleIDs -} - -func (r *RuleCreatorImpl) CreateAllRules() []ruleengine.RuleEvaluator { - var rules []ruleengine.RuleEvaluator - for _, rule := range r.RuleDescriptions { - rules = append(rules, rule.RuleCreationFunc()) - } - return rules -} - -func containsEventType(eventTypes []utils.EventType, eventType utils.EventType) bool { - for _, et := range eventTypes { - if et == eventType { - return true - } - } - return false -} diff --git a/pkg/ruleengine/v1/helpers.go b/pkg/ruleengine/v1/helpers.go deleted file mode 100644 index 452e89837..000000000 --- a/pkg/ruleengine/v1/helpers.go +++ /dev/null @@ -1,144 +0,0 @@ -package ruleengine - -import ( - "crypto/md5" - "errors" - "fmt" - "path/filepath" - "strings" - - events "github.com/kubescape/node-agent/pkg/ebpf/events" - - "github.com/kubescape/node-agent/pkg/objectcache" - - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -// SensitiveFiles is a list of sensitive files that should not be accessed by the application unexpectedly. -var SensitiveFiles = []string{ - "/etc/shadow", - "/etc/sudoers", - "/etc/ssh/ssh_config", - "/etc/ssh/sshd_config", -} - -var ( - ContainerNotFound = errors.New("container not found") - ProfileNotFound = errors.New("application profile not found") -) - -func GetExecPathFromEvent(event *events.ExecEvent) string { - if len(event.Args) > 0 { - if event.Args[0] != "" { - return event.Args[0] - } - } - return event.Comm -} - -func GetExecFullPathFromEvent(event *events.ExecEvent) string { - execPath := GetExecPathFromEvent(event) - if strings.HasPrefix(execPath, "./") || strings.HasPrefix(execPath, "../") { - execPath = filepath.Join(event.Cwd, execPath) - } else if !strings.HasPrefix(execPath, "/") { - execPath = "/" + execPath - } - return execPath -} - -func GetContainerFromApplicationProfile(ap *v1beta1.ApplicationProfile, containerName string) (v1beta1.ApplicationProfileContainer, error) { - for _, s := range ap.Spec.Containers { - if s.Name == containerName { - return s, nil - } - } - for _, s := range ap.Spec.InitContainers { - if s.Name == containerName { - return s, nil - } - } - for _, s := range ap.Spec.EphemeralContainers { - if s.Name == containerName { - return s, nil - } - } - return v1beta1.ApplicationProfileContainer{}, ContainerNotFound -} - -func GetContainerFromNetworkNeighborhood(nn *v1beta1.NetworkNeighborhood, containerName string) (v1beta1.NetworkNeighborhoodContainer, error) { - for _, c := range nn.Spec.Containers { - if c.Name == containerName { - return c, nil - } - } - for _, c := range nn.Spec.InitContainers { - if c.Name == containerName { - return c, nil - } - } - for _, c := range nn.Spec.EphemeralContainers { - if c.Name == containerName { - return c, nil - } - } - return v1beta1.NetworkNeighborhoodContainer{}, ContainerNotFound -} - -func GetContainerMountPaths(namespace, podName, containerName string, k8sObjCache objectcache.K8sObjectCache) ([]string, error) { - if k8sObjCache == nil { - return []string{}, fmt.Errorf("k8sObjCache is nil") - } - - podSpec := k8sObjCache.GetPodSpec(namespace, podName) - if podSpec == nil { - return []string{}, fmt.Errorf("pod spec not available for %s/%s", namespace, podName) - } - - var mountPaths []string - for _, container := range podSpec.Containers { - if container.Name == containerName { - for _, volumeMount := range container.VolumeMounts { - mountPaths = append(mountPaths, volumeMount.MountPath) - } - } - } - - for _, container := range podSpec.InitContainers { - if container.Name == containerName { - for _, volumeMount := range container.VolumeMounts { - mountPaths = append(mountPaths, volumeMount.MountPath) - } - } - } - - for _, container := range podSpec.EphemeralContainers { - if container.Name == containerName { - for _, volumeMount := range container.VolumeMounts { - mountPaths = append(mountPaths, volumeMount.MountPath) - } - } - } - - return mountPaths, nil -} - -func InterfaceToStringSlice(val interface{}) ([]string, bool) { - sliceOfInterfaces, ok := val.([]interface{}) - if ok { - sliceOfStrings := []string{} - for _, interfaceVal := range sliceOfInterfaces { - sliceOfStrings = append(sliceOfStrings, fmt.Sprintf("%v", interfaceVal)) - } - return sliceOfStrings, true - } - return nil, false -} - -func HashStringToMD5(str string) string { - // Create an md5 hash of the string - hash := md5.Sum([]byte(str)) - // Convert the hash to a hexadecimal string - hashString := fmt.Sprintf("%x", hash) - // Return the hash string - return hashString -} diff --git a/pkg/ruleengine/v1/profile.go b/pkg/ruleengine/v1/profile.go deleted file mode 100644 index 14915f71d..000000000 --- a/pkg/ruleengine/v1/profile.go +++ /dev/null @@ -1,80 +0,0 @@ -package ruleengine - -import ( - "slices" - - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -func IsExecEventInProfile(execEvent *events.ExecEvent, objectCache objectcache.ObjectCache, compareArgs bool) (bool, error) { - // Check if the exec is whitelisted, if so, return nil - execPath := GetExecPathFromEvent(execEvent) - - ap, err := GetApplicationProfile(execEvent.Runtime.ContainerID, objectCache) - if err != nil { - return false, err - } - - appProfileExecList, err := GetContainerFromApplicationProfile(ap, execEvent.GetContainer()) - if err != nil { - return false, err - } - - for _, exec := range appProfileExecList.Execs { - if exec.Path == execPath { - // Either compare args false or args match - if !compareArgs || slices.Compare(exec.Args, execEvent.Args) == 0 { - return true, nil - } - } - } - return false, nil -} - -func IsAllowed(event *eventtypes.Event, objCache objectcache.ObjectCache, process string, ruleId string) (bool, error) { - if objCache == nil { - return false, nil - } - ap, err := GetApplicationProfile(event.Runtime.ContainerID, objCache) - if err != nil { - return false, err - } - - appProfile, err := GetContainerFromApplicationProfile(ap, event.GetContainer()) - if err != nil { - return false, err - } - - // rule policy does not exists, allowed by default - if _, ok := appProfile.PolicyByRuleId[ruleId]; !ok { - return true, nil - } - - if policy, ok := appProfile.PolicyByRuleId[ruleId]; ok { - if policy.AllowedContainer || slices.Contains(policy.AllowedProcesses, process) { - return true, nil - } - } - - return false, nil -} - -func GetApplicationProfile(containerID string, objectCache objectcache.ObjectCache) (*v1beta1.ApplicationProfile, error) { - ap := objectCache.ApplicationProfileCache().GetApplicationProfile(containerID) - if ap == nil { - return nil, ruleprocess.NoProfileAvailable - } - return ap, nil -} - -func GetNetworkNeighborhood(containerID string, objectCache objectcache.ObjectCache) (*v1beta1.NetworkNeighborhood, error) { - nn := objectCache.NetworkNeighborhoodCache().GetNetworkNeighborhood(containerID) - if nn == nil { - return nil, ruleprocess.NoProfileAvailable - } - return nn, nil -} diff --git a/pkg/ruleengine/v1/r0001_unexpected_process_launched.go b/pkg/ruleengine/v1/r0001_unexpected_process_launched.go deleted file mode 100644 index d725c9032..000000000 --- a/pkg/ruleengine/v1/r0001_unexpected_process_launched.go +++ /dev/null @@ -1,172 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "slices" - "strings" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" -) - -const ( - R0001ID = "R0001" - R0001Name = "Unexpected process launched" -) - -var R0001UnexpectedProcessLaunchedRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0001ID, - Name: R0001Name, - Description: "Detecting exec calls that are not whitelisted by application profile", - Tags: []string{"exec", "whitelisted"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.ExecveEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0001UnexpectedProcessLaunched() - }, -} -var _ ruleengine.RuleEvaluator = (*R0001UnexpectedProcessLaunched)(nil) - -type R0001UnexpectedProcessLaunched struct { - BaseRule - enforceArgs bool -} - -func (rule *R0001UnexpectedProcessLaunched) SetParameters(params map[string]interface{}) { - if enforceArgs, ok := params["enforceArgs"].(bool); ok { - rule.enforceArgs = enforceArgs - } else { - rule.enforceArgs = false - } -} - -func (rule *R0001UnexpectedProcessLaunched) Name() string { - return R0001Name -} -func (rule *R0001UnexpectedProcessLaunched) ID() string { - return R0001ID -} - -func CreateRuleR0001UnexpectedProcessLaunched() *R0001UnexpectedProcessLaunched { - return &R0001UnexpectedProcessLaunched{enforceArgs: false} -} - -func (rule *R0001UnexpectedProcessLaunched) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.ExecveEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - execEvent, ok := event.(*events.ExecEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - execPath := GetExecPathFromEvent(execEvent) - return ruleengine.DetectionResult{IsFailure: true, Payload: execPath} -} - -func (rule *R0001UnexpectedProcessLaunched) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - execEvent, _ := event.(*events.ExecEvent) - ap, err := GetApplicationProfile(execEvent.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - appProfileExecList, err := GetContainerFromApplicationProfile(ap, execEvent.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - for _, execCall := range appProfileExecList.Execs { - if execCall.Path == detectionResult.Payload { - // if enforceArgs is set to true, we need to compare the arguments as well - // if not set, we only compare the path - if !rule.enforceArgs || slices.Compare(execCall.Args, execEvent.Args) == 0 { - return ruleengine.DetectionResult{IsFailure: false, Payload: execCall.Path}, nil - } - } - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: nil}, nil -} - -func (rule *R0001UnexpectedProcessLaunched) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - execEvent, _ := event.(*events.ExecEvent) - execPath := GetExecPathFromEvent(execEvent) - - // If the parent process is in the upper layer, the child process is also in the upper layer. - upperLayer := execEvent.UpperLayer || execEvent.PupperLayer - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%s", execEvent.Comm, execEvent.ExePath, execEvent.Pcomm)), - AlertName: rule.Name(), - InfectedPID: execEvent.Pid, - Arguments: map[string]interface{}{ - "retval": execEvent.Retval, - "exec": execPath, - "args": execEvent.Args, - }, - Severity: R0001UnexpectedProcessLaunchedRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: execEvent.Comm, - CommandLine: fmt.Sprintf("%s %s", execPath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - File: &common.FileEntity{ - Name: filepath.Base(GetExecFullPathFromEvent(execEvent)), - Directory: filepath.Dir(GetExecFullPathFromEvent(execEvent)), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: execEvent.Comm, - Gid: &execEvent.Gid, - PID: execEvent.Pid, - Uid: &execEvent.Uid, - UpperLayer: &upperLayer, - PPID: execEvent.Ppid, - Pcomm: execEvent.Pcomm, - Cwd: execEvent.Cwd, - Hardlink: execEvent.ExePath, - Path: GetExecFullPathFromEvent(execEvent), - Cmdline: fmt.Sprintf("%s %s", execPath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - ContainerID: execEvent.Runtime.ContainerID, - }, - TriggerEvent: execEvent.Event.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Unexpected process launched: %s", execPath), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), - PodLabels: execEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: execEvent.GetExtra(), - } -} - -func (rule *R0001UnexpectedProcessLaunched) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0001UnexpectedProcessLaunchedRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0001_unexpected_process_launched_test.go b/pkg/ruleengine/v1/r0001_unexpected_process_launched_test.go deleted file mode 100644 index 73c9dec0d..000000000 --- a/pkg/ruleengine/v1/r0001_unexpected_process_launched_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package ruleengine - -import ( - "testing" - - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/utils" - - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" -) - -func TestR0001UnexpectedProcessLaunched(t *testing.T) { - // Create a new rule - r := CreateRuleR0001UnexpectedProcessLaunched() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - e := &events.ExecEvent{ - Event: tracerexectype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "/test", - Args: []string{"test"}, - }, - } - - // Test with nil appProfileAccess - ruleResult := ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &objectcache.ObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil must have an appProfile") - } - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{} - profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ - Name: "test", - Execs: []v1beta1.ExecCalls{ - { - Path: "test", - Args: []string{"test"}, - }, - }, - }) - - objCache.SetApplicationProfile(profile) - } - - // Test with whitelisted exec - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since exec is whitelisted") - } - - // Test with non-whitelisted exec - e = &events.ExecEvent{ - Event: tracerexectype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "/asdasd", - Args: []string{"asdasd"}, - }, - } - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since exec is not whitelisted") - } - - // Test /bin/sh - profile.Spec.Containers[0].Execs = append(profile.Spec.Containers[0].Execs, v1beta1.ExecCalls{ - Path: "/bin/sh", - Args: []string{"/bin/sh", "-s", "unix:cmd"}, - }) - objCache.SetApplicationProfile(profile) - - e.Comm = "sh" - e.Args = []string{"/bin/sh", "-s", "unix:cmd"} - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since exec is whitelisted") - } -} - -func TestR0001UnexpectedProcessLaunchedArgCompare(t *testing.T) { - // Create a new rule - r := CreateRuleR0001UnexpectedProcessLaunched() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - r.SetParameters(map[string]interface{}{"enforceArgs": false}) - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{} - profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ - Name: "test", - Execs: []v1beta1.ExecCalls{ - { - Path: "/test", - Args: []string{"test"}, - }, - }, - }) - - objCache.SetApplicationProfile(profile) - } - - e := &events.ExecEvent{ - Event: tracerexectype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - ExePath: "/test", - Args: []string{"/test", "something"}, - }, - } - - // Test with whitelisted exec - ruleResult := ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since exec is whitelisted and args are not enforced") - } - - // Create a new rule with enforceArgs set to true - r = CreateRuleR0001UnexpectedProcessLaunched() - r.SetParameters(map[string]interface{}{"enforceArgs": true}) - - // Test with whitelisted exec and enforceArgs set to true - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since exec is whitelisted but args are enforced") - } - -} diff --git a/pkg/ruleengine/v1/r0002_unexpected_file_access.go b/pkg/ruleengine/v1/r0002_unexpected_file_access.go deleted file mode 100644 index f8c9dc596..000000000 --- a/pkg/ruleengine/v1/r0002_unexpected_file_access.go +++ /dev/null @@ -1,235 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" - - "github.com/kubescape/node-agent/pkg/objectcache" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - "github.com/kubescape/go-logger" - "github.com/kubescape/go-logger/helpers" -) - -const ( - R0002ID = "R0002" - R0002Name = "Unexpected file access" -) - -var R0002UnexpectedFileAccessRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0002ID, - Name: R0002Name, - Description: "Detecting file access that are not whitelisted by application profile. File access is defined by the combination of path and flags", - Tags: []string{"open", "whitelisted"}, - Priority: RulePriorityLow, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.OpenEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0002UnexpectedFileAccess() - }, -} -var _ ruleengine.RuleEvaluator = (*R0002UnexpectedFileAccess)(nil) - -type R0002UnexpectedFileAccess struct { - BaseRule - shouldIgnoreMounts bool - ignorePrefixes []string - includePrefixes []string -} - -func CreateRuleR0002UnexpectedFileAccess() *R0002UnexpectedFileAccess { - return &R0002UnexpectedFileAccess{ - shouldIgnoreMounts: false, - ignorePrefixes: []string{}, - includePrefixes: []string{}, - } -} - -func (rule *R0002UnexpectedFileAccess) Name() string { - return R0002Name -} -func (rule *R0002UnexpectedFileAccess) ID() string { - return R0002ID -} - -func (rule *R0002UnexpectedFileAccess) SetParameters(parameters map[string]interface{}) { - rule.BaseRule.SetParameters(parameters) - - rule.shouldIgnoreMounts = fmt.Sprintf("%v", rule.GetParameters()["ignoreMounts"]) == "true" - - ignorePrefixesInterface := rule.GetParameters()["ignorePrefixes"] - if ignorePrefixesInterface != nil { - ignorePrefixes, ok := InterfaceToStringSlice(ignorePrefixesInterface) - if ok { - rule.ignorePrefixes = ignorePrefixes - } else { - logger.L().Warning("failed to convert ignorePrefixes to []string", helpers.String("ruleID", rule.ID())) - } - } - - includePrefixesInterface := rule.GetParameters()["includePrefixes"] - if includePrefixesInterface != nil { - includePrefixes, ok := InterfaceToStringSlice(includePrefixesInterface) - if ok { - rule.includePrefixes = includePrefixes - } else { - logger.L().Warning("failed to convert includePrefixes to []string", helpers.String("ruleID", rule.ID())) - } - } - -} - -func (rule *R0002UnexpectedFileAccess) DeleteRule() { -} - -func (rule *R0002UnexpectedFileAccess) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.OpenEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - fullEvent, ok := event.(*events.OpenEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - openEvent := fullEvent.Event - - // Check if we have include prefixes and if the path is not in the include prefixes, return nil - if len(rule.includePrefixes) > 0 { - include := false - for _, prefix := range rule.includePrefixes { - if strings.HasPrefix(openEvent.FullPath, prefix) { - include = true - } - } - if !include { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - } - - // Check if path is ignored - for _, prefix := range rule.ignorePrefixes { - if strings.HasPrefix(openEvent.FullPath, prefix) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - } - - if rule.shouldIgnoreMounts { - mounts, err := GetContainerMountPaths(openEvent.GetNamespace(), openEvent.GetPod(), openEvent.GetContainer(), k8sObjCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - for _, mount := range mounts { - if isPathContained(mount, openEvent.FullPath) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - } - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: openEvent.FullPath} -} - -func (rule *R0002UnexpectedFileAccess) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - openEventTyped, _ := event.(*events.OpenEvent) - ap, err := GetApplicationProfile(openEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - appProfileOpenList, err := GetContainerFromApplicationProfile(ap, openEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - for _, open := range appProfileOpenList.Opens { - if dynamicpathdetector.CompareDynamic(open.Path, openEventTyped.FullPath) { - found := 0 - for _, eventOpenFlag := range openEventTyped.Flags { - // Check that event open flag is in the open.Flags - for _, profileOpenFlag := range open.Flags { - if eventOpenFlag == profileOpenFlag { - found += 1 - } - } - } - if found == len(openEventTyped.Flags) { - return ruleengine.DetectionResult{IsFailure: false, Payload: open.Path}, nil - } - } - } - - return detectionResult, nil -} - -func (rule *R0002UnexpectedFileAccess) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - openEvent, _ := event.(*events.OpenEvent) - openEventTyped := openEvent.Event - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", openEventTyped.Comm, openEventTyped.FullPath)), - AlertName: rule.Name(), - InfectedPID: openEventTyped.Pid, - Arguments: map[string]interface{}{ - "flags": openEventTyped.Flags, - "path": openEventTyped.FullPath, - }, - Severity: R0002UnexpectedFileAccessRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: openEventTyped.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(openEventTyped.FullPath), - Directory: filepath.Dir(openEventTyped.FullPath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: openEventTyped.Comm, - Gid: &openEventTyped.Gid, - PID: openEventTyped.Pid, - Uid: &openEventTyped.Uid, - }, - ContainerID: openEventTyped.Runtime.ContainerID, - }, - TriggerEvent: openEventTyped.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Unexpected file access: %s with flags %s", openEventTyped.FullPath, strings.Join(openEventTyped.Flags, ",")), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: openEventTyped.GetPod(), - }, - RuleID: rule.ID(), - Extra: openEvent.GetExtra(), - } -} - -func isPathContained(basepath, targetpath string) bool { - return strings.HasPrefix(targetpath, basepath) -} - -func (rule *R0002UnexpectedFileAccess) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0002UnexpectedFileAccessRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0002_unexpected_file_access_test.go b/pkg/ruleengine/v1/r0002_unexpected_file_access_test.go deleted file mode 100644 index ed80bc6ad..000000000 --- a/pkg/ruleengine/v1/r0002_unexpected_file_access_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package ruleengine - -import ( - "testing" - - corev1 "k8s.io/api/core/v1" - - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" -) - -func TestR0002UnexpectedFileAccess(t *testing.T) { - // Create a new rule - r := CreateRuleR0002UnexpectedFileAccess() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create a file access event - e := &events.OpenEvent{ - Event: traceropentype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Path: "/test", - FullPath: "/test", - Flags: []string{"O_RDONLY"}, - }, - } - // Test with nil appProfileAccess - ruleResult := ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objectcache.ObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to not be nil since no appProfile") - } - - // Test with whitelisted file - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - Opens: []v1beta1.OpenCalls{ - { - Path: "/test", - Flags: []string{"O_RDONLY"}, - }, - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - } - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is whitelisted") - } - - e.FullPath = "/var/log/app123.log" - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - Opens: []v1beta1.OpenCalls{ - { - Path: "/var/log/\u22ef", - Flags: []string{"O_RDONLY"}, - }, - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - r.SetParameters(map[string]interface{}{"ignoreMounts": false, "ignorePrefixes": []interface{}{}}) - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file matches dynamic path in profile") - } - - // Test with dynamic path but different flags - e.Flags = []string{"O_WRONLY"} - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since flag is not whitelisted for dynamic path") - } - - // Test with dynamic path but non-matching file - e.FullPath = "/var/log/different_directory/app123.log" - e.Flags = []string{"O_RDONLY"} - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since file does not match dynamic path structure") - } - - // Test with multiple dynamic segments - e.FullPath = "/var/log/user123/app456.log" - profile.Spec.Containers[0].Opens = []v1beta1.OpenCalls{ - { - Path: "/var/log/\u22ef/\u22ef", - Flags: []string{"O_RDONLY"}, - }, - } - objCache.SetApplicationProfile(profile) - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file matches multiple dynamic segments in profile") - } - - // Test with whitelisted file, but different flags - e.Flags = []string{"O_WRONLY"} - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since flag is not whitelisted") - } - - // Test with mounted file - e.Flags = []string{"O_RDONLY"} - e.FullPath = "/var/test1" - objCache.SetPodSpec(&corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "test", - MountPath: "/var", - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "test", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var", - }, - }, - }, - }, - }) - r.SetParameters(map[string]interface{}{"ignoreMounts": true}) - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is mounted") - } - - // Test with ignored prefix - e.FullPath = "/var/test1" - ignorePrefixes := []interface{}{"/var"} - r.SetParameters(map[string]interface{}{"ignoreMounts": false, "ignorePrefixes": ignorePrefixes}) - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is ignored") - } - - // Test with include prefixes - e.FullPath = "/var/test1" - includePrefixes := []interface{}{"/etc"} - r.SetParameters(map[string]interface{}{"ignoreMounts": false, "ignorePrefixes": ignorePrefixes, "includePrefixes": includePrefixes}) - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is not included") - } - - // Test the case where the path is included - e.FullPath = "/etc/passwd" - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since file is included") - } - - // Test the case where the path is included but ignored - e.FullPath = "/etc/some/random/path/passwd" - ignorePrefixes = []interface{}{"/etc/some"} - r.SetParameters(map[string]interface{}{"ignoreMounts": false, "ignorePrefixes": ignorePrefixes, "includePrefixes": includePrefixes}) - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is ignored") - } -} diff --git a/pkg/ruleengine/v1/r0003_unexpected_system_call.go b/pkg/ruleengine/v1/r0003_unexpected_system_call.go deleted file mode 100644 index e4898182c..000000000 --- a/pkg/ruleengine/v1/r0003_unexpected_system_call.go +++ /dev/null @@ -1,147 +0,0 @@ -package ruleengine - -import ( - "fmt" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" - - apitypes "github.com/armosec/armoapi-go/armotypes" - mapset "github.com/deckarep/golang-set/v2" -) - -const ( - R0003ID = "R0003" - R0003Name = "Unexpected system call" -) - -var R0003UnexpectedSystemCallRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0003ID, - Name: R0003Name, - Description: "Detecting unexpected system calls that are not whitelisted by application profile.", - Tags: []string{"syscall", "whitelisted"}, - Priority: RulePriorityLow, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.SyscallEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0003UnexpectedSystemCall() - }, -} - -var _ ruleengine.RuleEvaluator = (*R0003UnexpectedSystemCall)(nil) - -type R0003UnexpectedSystemCall struct { - BaseRule - listOfAlertedSyscalls mapset.Set[string] -} - -func CreateRuleR0003UnexpectedSystemCall() *R0003UnexpectedSystemCall { - return &R0003UnexpectedSystemCall{ - listOfAlertedSyscalls: mapset.NewSet[string](), - } -} - -func (rule *R0003UnexpectedSystemCall) Name() string { - return R0003Name -} - -func (rule *R0003UnexpectedSystemCall) ID() string { - return R0003ID -} - -func (rule *R0003UnexpectedSystemCall) DeleteRule() { -} - -func (rule *R0003UnexpectedSystemCall) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.SyscallEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - syscallEvent, ok := event.(*ruleenginetypes.SyscallEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - // We have already alerted for this syscall - if rule.listOfAlertedSyscalls.ContainsOne(syscallEvent.SyscallName) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: syscallEvent} -} - -func (rule *R0003UnexpectedSystemCall) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - syscallEventTyped, _ := event.(*ruleenginetypes.SyscallEvent) - ap, err := GetApplicationProfile(syscallEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - container, err := GetContainerFromApplicationProfile(ap, syscallEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - // If the syscall is whitelisted, return nil - for _, syscall := range container.Syscalls { - if syscall == syscallEventTyped.SyscallName { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: nil}, nil -} - -func (rule *R0003UnexpectedSystemCall) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - syscallEvent, _ := event.(*ruleenginetypes.SyscallEvent) - - rule.listOfAlertedSyscalls.Add(syscallEvent.SyscallName) - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(syscallEvent.SyscallName), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "syscall": syscallEvent.SyscallName, - }, - InfectedPID: syscallEvent.Pid, - Severity: R0003UnexpectedSystemCallRuleDescriptor.Priority, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - PID: syscallEvent.Pid, - }, - ContainerID: syscallEvent.Runtime.ContainerID, - }, - TriggerEvent: syscallEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Unexpected system call: %s", syscallEvent.SyscallName), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: syscallEvent.GetPod(), - }, - RuleID: rule.ID(), - } -} - -func (rule *R0003UnexpectedSystemCall) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0003UnexpectedSystemCallRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0003_unexpected_system_call_test.go b/pkg/ruleengine/v1/r0003_unexpected_system_call_test.go deleted file mode 100644 index 9e94f29d0..000000000 --- a/pkg/ruleengine/v1/r0003_unexpected_system_call_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package ruleengine - -import ( - "fmt" - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" - - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -func TestR0003UnexpectedSystemCall(t *testing.T) { - // Create a new rule - r := CreateRuleR0003UnexpectedSystemCall() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create a syscall event - e := &ruleenginetypes.SyscallEvent{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "test", - SyscallName: "test", - } - - // Test with nil application profile - ruleResult := ruleprocess.ProcessRule(r, utils.SyscallEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since no syscall event") - } - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - Syscalls: []string{ - "test", - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - } - // Test with mock application activity and syscall - ruleResult = ruleprocess.ProcessRule(r, utils.SyscallEventType, e, &objCache) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since syscall is whitelisted") - } - - objCache.SetApplicationProfile(&v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - Syscalls: []string{ - "test1", - }, - }, - }, - }, - }) - - // Test with mock application activity and syscall - ruleResult = ruleprocess.ProcessRule(r, utils.SyscallEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since syscall is not whitelisted") - } -} diff --git a/pkg/ruleengine/v1/r0004_unexpected_capability_used.go b/pkg/ruleengine/v1/r0004_unexpected_capability_used.go deleted file mode 100644 index f2e5e387d..000000000 --- a/pkg/ruleengine/v1/r0004_unexpected_capability_used.go +++ /dev/null @@ -1,146 +0,0 @@ -package ruleengine - -import ( - "fmt" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - "github.com/goradd/maps" - tracercapabilitiestype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/capabilities/types" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" -) - -const ( - R0004ID = "R0004" - R0004Name = "Unexpected capability used" -) - -var R0004UnexpectedCapabilityUsedRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0004ID, - Name: R0004Name, - Description: "Detecting unexpected capabilities that are not whitelisted by application profile. Every unexpected capability is identified in context of a syscall and will be alerted only once per container.", - Tags: []string{"capabilities", "whitelisted"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.CapabilitiesEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0004UnexpectedCapabilityUsed() - }, -} -var _ ruleengine.RuleEvaluator = (*R0004UnexpectedCapabilityUsed)(nil) - -type R0004UnexpectedCapabilityUsed struct { - BaseRule - alertedCapabilities maps.SafeMap[string, bool] -} - -func CreateRuleR0004UnexpectedCapabilityUsed() *R0004UnexpectedCapabilityUsed { - return &R0004UnexpectedCapabilityUsed{} -} -func (rule *R0004UnexpectedCapabilityUsed) Name() string { - return R0004Name -} - -func (rule *R0004UnexpectedCapabilityUsed) ID() string { - return R0004ID -} - -func (rule *R0004UnexpectedCapabilityUsed) DeleteRule() { -} - -func (rule *R0004UnexpectedCapabilityUsed) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.CapabilitiesEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - capEvent, ok := event.(*tracercapabilitiestype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if rule.alertedCapabilities.Has(capEvent.CapName) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: capEvent.CapName} -} - -func (rule *R0004UnexpectedCapabilityUsed) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - capEventTyped, _ := event.(*tracercapabilitiestype.Event) - ap, err := GetApplicationProfile(capEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - appProfileCapabilitiesList, err := GetContainerFromApplicationProfile(ap, capEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - for _, capability := range appProfileCapabilitiesList.Capabilities { - if capEventTyped.CapName == capability { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - - return detectionResult, nil -} - -func (rule *R0004UnexpectedCapabilityUsed) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - capEvent, _ := event.(*tracercapabilitiestype.Event) - rule.alertedCapabilities.Set(capEvent.CapName, true) - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", capEvent.Comm, capEvent.CapName)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "syscall": capEvent.Syscall, - "capability": capEvent.CapName, - }, - InfectedPID: capEvent.Pid, - Severity: R0004UnexpectedCapabilityUsedRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: capEvent.Comm, - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: capEvent.Comm, - Gid: &capEvent.Gid, - PID: capEvent.Pid, - Uid: &capEvent.Uid, - }, - ContainerID: capEvent.Runtime.ContainerID, - }, - TriggerEvent: capEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Unexpected capability used (capability %s used in syscall %s)", capEvent.CapName, capEvent.Syscall), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: capEvent.GetPod(), - }, - RuleID: rule.ID(), - } -} - -func (rule *R0004UnexpectedCapabilityUsed) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0004UnexpectedCapabilityUsedRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0004_unexpected_capability_used_test.go b/pkg/ruleengine/v1/r0004_unexpected_capability_used_test.go deleted file mode 100644 index 952ada1f2..000000000 --- a/pkg/ruleengine/v1/r0004_unexpected_capability_used_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package ruleengine - -import ( - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - tracercapabilitiestype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/capabilities/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -func TestR0004UnexpectedCapabilityUsed(t *testing.T) { - // Create a new rule - r := CreateRuleR0004UnexpectedCapabilityUsed() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - e := &tracercapabilitiestype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - CapName: "test_cap", - Syscall: "test_call", - } - - // Test with nil appProfileAccess - ruleResult := ruleprocess.ProcessRule(r, utils.CapabilitiesEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since no appProfile is present") - } - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{} - profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ - Name: "test", - Capabilities: []string{"test_cap"}, - }) - - objCache.SetApplicationProfile(profile) - } - - // Test with mock appProfile - ruleResult = ruleprocess.ProcessRule(r, utils.CapabilitiesEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since capability is in the profile") - } -} diff --git a/pkg/ruleengine/v1/r0005_unexpected_domain_request.go b/pkg/ruleengine/v1/r0005_unexpected_domain_request.go deleted file mode 100644 index d1e98bb91..000000000 --- a/pkg/ruleengine/v1/r0005_unexpected_domain_request.go +++ /dev/null @@ -1,181 +0,0 @@ -package ruleengine - -import ( - "fmt" - "slices" - "strings" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - lru "github.com/hashicorp/golang-lru/v2" - tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" - "github.com/kubescape/go-logger" - "github.com/kubescape/go-logger/helpers" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" -) - -const ( - R0005ID = "R0005" - R0005Name = "Unexpected domain request" -) - -var R0005UnexpectedDomainRequestRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0005ID, - Name: R0005Name, - Description: "Detecting unexpected domain requests that are not whitelisted by application profile.", - Tags: []string{"dns", "whitelisted"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.DnsEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0005UnexpectedDomainRequest() - }, -} -var _ ruleengine.RuleEvaluator = (*R0005UnexpectedDomainRequest)(nil) - -type R0005UnexpectedDomainRequest struct { - BaseRule - alertedDomains *lru.Cache[string, bool] -} - -func CreateRuleR0005UnexpectedDomainRequest() *R0005UnexpectedDomainRequest { - alertedDomains, err := lru.New[string, bool](500) - if err != nil { - logger.L().Fatal("creating lru cache", helpers.Error(err)) - return nil - } - return &R0005UnexpectedDomainRequest{alertedDomains: alertedDomains} -} - -func (rule *R0005UnexpectedDomainRequest) Name() string { - return R0005Name -} - -func (rule *R0005UnexpectedDomainRequest) ID() string { - return R0005ID -} - -func (rule *R0005UnexpectedDomainRequest) DeleteRule() { -} - -func (rule *R0005UnexpectedDomainRequest) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.DnsEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - domainEvent, ok := event.(*tracerdnstype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if rule.alertedDomains.Contains(domainEvent.DNSName) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - // TODO: fix this, currently we are ignoring in-cluster communication - if strings.HasSuffix(domainEvent.DNSName, "svc.cluster.local.") { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: domainEvent} -} - -func (rule *R0005UnexpectedDomainRequest) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - domainEventTyped, _ := event.(*tracerdnstype.Event) - nn, err := GetNetworkNeighborhood(domainEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - nnContainer, err := GetContainerFromNetworkNeighborhood(nn, domainEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - // Check that the domain is in the network neighbors - for _, dns := range nnContainer.Egress { - if dns.DNS == domainEventTyped.DNSName || slices.Contains(dns.DNSNames, domainEventTyped.DNSName) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - - return detectionResult, nil -} - -func (rule *R0005UnexpectedDomainRequest) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - domainEvent, _ := event.(*tracerdnstype.Event) - rule.alertedDomains.Add(domainEvent.DNSName, true) - - dstIP := "" - if len(domainEvent.Addresses) > 0 { - dstIP = domainEvent.Addresses[0] - } - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", domainEvent.Comm, domainEvent.DNSName)), - AlertName: rule.Name(), - InfectedPID: domainEvent.Pid, - Arguments: map[string]interface{}{ - "domain": domainEvent.DNSName, - "addresses": domainEvent.Addresses, - "protocol": domainEvent.Protocol, - "port": domainEvent.DstPort, - }, - Severity: R0005UnexpectedDomainRequestRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: domainEvent.Comm, - }, - Dns: &common.DnsEntity{ - Domain: domainEvent.DNSName, - }, - Network: &common.NetworkEntity{ - DstIP: dstIP, - Protocol: domainEvent.Protocol, - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: domainEvent.Comm, - Gid: &domainEvent.Gid, - PID: domainEvent.Pid, - Uid: &domainEvent.Uid, - Pcomm: domainEvent.Pcomm, - Path: domainEvent.Exepath, - Cwd: domainEvent.Cwd, - PPID: domainEvent.Ppid, - }, - ContainerID: domainEvent.Runtime.ContainerID, - }, - TriggerEvent: domainEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Unexpected domain communication: %s from: %s", domainEvent.DNSName, domainEvent.GetContainer()), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: domainEvent.GetPod(), - PodLabels: domainEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R0005UnexpectedDomainRequest) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0005UnexpectedDomainRequestRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.NetworkProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0005_unexpected_domain_request_test.go b/pkg/ruleengine/v1/r0005_unexpected_domain_request_test.go deleted file mode 100644 index ecc73dbb5..000000000 --- a/pkg/ruleengine/v1/r0005_unexpected_domain_request_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package ruleengine - -import ( - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -func TestR0005UnexpectedDomainRequest(t *testing.T) { - // Create a new rule - r := CreateRuleR0005UnexpectedDomainRequest() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create a domain request event - e := &tracerdnstype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - DNSName: "test.com", - Qr: tracerdnstype.DNSPktTypeQuery, - } - - // Test with nil appProfileAccess - ruleResult := ruleprocess.ProcessRule(r, utils.DnsEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to not be nil since no appProfile") - } - - // Test with whitelisted domain - objCache := RuleObjectCacheMock{} - nn := objCache.NetworkNeighborhoodCache().GetNetworkNeighborhood("test") - if nn == nil { - nn = &v1beta1.NetworkNeighborhood{} - nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ - Name: "test", - - Egress: []v1beta1.NetworkNeighbor{ - { - DNS: "test.com", - }, - }, - }) - - objCache.SetNetworkNeighborhood(nn) - } - - ruleResult = ruleprocess.ProcessRule(r, utils.DnsEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since domain is whitelisted") - } -} diff --git a/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access.go b/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access.go deleted file mode 100644 index 18ffad3f4..000000000 --- a/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access.go +++ /dev/null @@ -1,207 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "strings" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" -) - -const ( - R0006ID = "R0006" - R0006Name = "Unexpected Service Account Token Access" -) - -var serviceAccountTokenPathsPrefixes = []string{ - "/run/secrets/kubernetes.io/serviceaccount", - "/var/run/secrets/kubernetes.io/serviceaccount", - "/run/secrets/eks.amazonaws.com/serviceaccount", - "/var/run/secrets/eks.amazonaws.com/serviceaccount", -} - -var R0006UnexpectedServiceAccountTokenAccessRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0006ID, - Name: R0006Name, - Description: "Detecting unexpected access to service account token.", - Tags: []string{"token", "malicious", "security", "kubernetes"}, - Priority: RulePriorityHigh, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.OpenEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0006UnexpectedServiceAccountTokenAccess() - }, -} - -type R0006UnexpectedServiceAccountTokenAccess struct { - BaseRule -} - -// getTokenBasePath returns the base service account token path if the path is a token path, -// otherwise returns an empty string. Using a single iteration through prefixes. -func getTokenBasePath(path string) string { - for _, prefix := range serviceAccountTokenPathsPrefixes { - if strings.HasPrefix(path, prefix) { - return prefix - } - } - return "" -} - -// normalizeTokenPath removes timestamp directories from the path while maintaining -// the essential structure. Handles both timestamp directories and dynamic identifiers. -func normalizeTimestampPath(path string) string { - parts := strings.Split(filepath.Clean(path), string(filepath.Separator)) - var normalized []string - - for _, part := range parts { - if part == "" { - continue - } - - // Replace timestamp directories with their base form - if strings.HasPrefix(part, "..") && strings.Contains(part, "_") { - normalized = append(normalized, "..timestamp") - continue - } - - normalized = append(normalized, part) - } - - return "/" + strings.Join(normalized, "/") -} - -func CreateRuleR0006UnexpectedServiceAccountTokenAccess() *R0006UnexpectedServiceAccountTokenAccess { - return &R0006UnexpectedServiceAccountTokenAccess{} -} - -func (rule *R0006UnexpectedServiceAccountTokenAccess) Name() string { - return R0006Name -} - -func (rule *R0006UnexpectedServiceAccountTokenAccess) ID() string { - return R0006ID -} - -func (rule *R0006UnexpectedServiceAccountTokenAccess) DeleteRule() {} - -func (rule *R0006UnexpectedServiceAccountTokenAccess) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.OpenEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - convertedEvent, ok := event.(*events.OpenEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - openEvent := convertedEvent.Event - - // Check if this is a token path - using optimized check - if getTokenBasePath(openEvent.FullPath) == "" { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: openEvent.FullPath} -} - -func (rule *R0006UnexpectedServiceAccountTokenAccess) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - - openEventTyped, _ := event.(*events.OpenEvent) - ap, err := GetApplicationProfile(openEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - appProfileOpenList, err := GetContainerFromApplicationProfile(ap, openEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - // Normalize the accessed path once - normalizedAccessedPath := normalizeTimestampPath(openEventTyped.FullPath) - - // Check against whitelisted paths - for _, open := range appProfileOpenList.Opens { - normalizedWhitelistedPath := normalizeTimestampPath(open.Path) - if dynamicpathdetector.CompareDynamic(filepath.Dir(normalizedWhitelistedPath), filepath.Dir(normalizedAccessedPath)) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - - return detectionResult, nil -} - -func (rule *R0006UnexpectedServiceAccountTokenAccess) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - convertedEvent, _ := event.(*events.OpenEvent) - openEvent := convertedEvent.Event - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(openEvent.Comm), // We don't want to use the full path as it can be dynamic (https://kubernetes.io/docs/concepts/security/service-accounts/#assign-to-pod) - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "path": openEvent.FullPath, - "flags": openEvent.Flags, - }, - InfectedPID: openEvent.Pid, - Severity: R0006UnexpectedServiceAccountTokenAccessRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: openEvent.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(openEvent.FullPath), - Directory: filepath.Dir(openEvent.FullPath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: openEvent.Comm, - Gid: &openEvent.Gid, - PID: openEvent.Pid, - Uid: &openEvent.Uid, - }, - ContainerID: openEvent.Runtime.ContainerID, - }, - TriggerEvent: openEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf( - "Unexpected access to service account token: %s with flags: %s", - openEvent.FullPath, - strings.Join(openEvent.Flags, ","), - ), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: openEvent.GetPod(), - PodLabels: openEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: convertedEvent.GetExtra(), - } -} - -func (rule *R0006UnexpectedServiceAccountTokenAccess) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0006UnexpectedServiceAccountTokenAccessRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access_test.go b/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access_test.go deleted file mode 100644 index 9331f415b..000000000 --- a/pkg/ruleengine/v1/r0006_unexpected_service_account_token_access_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package ruleengine - -import ( - "fmt" - "testing" - - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" -) - -func createTestEvent0006(containerName, path string, flags []string) *events.OpenEvent { - return &events.OpenEvent{ - Event: traceropentype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: containerName, - }, - }, - }, - }, - Path: path, - FullPath: path, - Flags: flags, - }, - } -} - -func createTestProfile0006(containerName string, openCalls []v1beta1.OpenCalls) *v1beta1.ApplicationProfile { - return &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: containerName, - Opens: openCalls, - }, - }, - }, - } -} - -func TestR0006UnexpectedServiceAccountTokenMount(t *testing.T) { - tests := []struct { - name string - event *events.OpenEvent - profile *v1beta1.ApplicationProfile - expectFailure bool - }{ - // Non-token path tests - { - name: "non-token path access", - event: createTestEvent0006("test", "/test", []string{"O_RDONLY"}), - expectFailure: false, - }, - { - name: "path with similar prefix but not token path", - event: createTestEvent0006("test", "/run/secrets/kubernetes.io/other", []string{"O_RDONLY"}), - expectFailure: false, - }, - - // Directory level whitelist tests - { - name: "access allowed when directory is whitelisted - token", - event: createTestEvent0006("test", "/run/secrets/kubernetes.io/serviceaccount/token", []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: "/run/secrets/kubernetes.io/serviceaccount/namespace", - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: false, // Should pass because directory is whitelisted - }, - { - name: "access allowed when directory is whitelisted - ca.crt", - event: createTestEvent0006("test", "/run/secrets/kubernetes.io/serviceaccount/ca.crt", []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: "/run/secrets/kubernetes.io/serviceaccount/token", - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: false, // Should pass because directory is whitelisted - }, - { - name: "access is not allowed when directory is not whitelisted - namespace", - event: createTestEvent0006("test", "/run/secrets/kubernetes.io/serviceaccount/namespace", []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: "home", - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: true, // Should fail because directory is not whitelisted - }, - - // Tests with EKS paths and timestamps - { - name: "whitelisted eks token access with timestamps", - event: createTestEvent0006("test", - "/run/secrets/eks.amazonaws.com/serviceaccount/..2024_11_1111_24_34_58.850095521/token", - []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: "/run/secrets/eks.amazonaws.com/serviceaccount/..2024_11_21_04_30_58.850095521/namespace", - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: false, // Should pass because normalized directory matches - }, - // Tests with EKS paths and timestamps - { - name: "whitelisted eks token access with timestamps with compress", - event: createTestEvent0006("test", - "/run/secrets/eks.amazonaws.com/serviceaccount/..2024_11_1111_24_34_58.850095521/token", - []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: fmt.Sprintf("/run/secrets/eks.amazonaws.com/serviceaccount/%s/token", dynamicpathdetector.DynamicIdentifier), - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: false, // Should pass because normalized directory matches - }, - // Tests with EKS paths and timestamps - { - name: "whitelisted eks token access with timestamps with compress hello world", - event: createTestEvent0006("test", - "/var/run/secrets/eks.amazonaws.com/serviceaccount/..2024_11_1111_24_34_58.850095521/token", - []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: fmt.Sprintf("/%s/%s/%s/%s/%s/%s/token", dynamicpathdetector.DynamicIdentifier, dynamicpathdetector.DynamicIdentifier, dynamicpathdetector.DynamicIdentifier, dynamicpathdetector.DynamicIdentifier, dynamicpathdetector.DynamicIdentifier, dynamicpathdetector.DynamicIdentifier), - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: false, // Should pass because normalized directory matches - }, - // Tests with k8s paths and timestamps - { - name: "non whitelisted k8s token access with timestamps", - event: createTestEvent0006("test", - "/run/secrets/kubernetes.io/serviceaccount/..2024_11_24_09_06_53.3676909075/token", - []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: "/run/secrets", - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: true, // Should fail because normalized directory does not match - }, - // Tests with k8s paths and timestamps - { - name: "non whitelisted k8s token access with timestamps", - event: createTestEvent0006("test", - "/run/secrets/kubernetes.io/serviceaccount/..2024_11_24_09_06_53.3676909075/token", - []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: "/", - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: true, // Should fail because normalized directory does not match - }, - - // Different service account path variants - { - name: "var/run path variant matches run path whitelist", - event: createTestEvent0006("test", "/var/run/secrets/kubernetes.io/serviceaccount/token", []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: "/run/secrets/kubernetes.io/serviceaccount/namespace", - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: true, // Should fail because different base path - }, - - // No whitelisting tests - { - name: "unauthorized token access", - event: createTestEvent0006("test", "/run/secrets/kubernetes.io/serviceaccount/token", []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: "/some/other/path", - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: true, - }, - - // Container mismatch tests - { - name: "different container name", - event: createTestEvent0006("test2", "/run/secrets/kubernetes.io/serviceaccount/token", []string{"O_RDONLY"}), - profile: createTestProfile0006("test", []v1beta1.OpenCalls{{ - Path: "/run/secrets/kubernetes.io/serviceaccount/token", - Flags: []string{"O_RDONLY"}, - }}), - expectFailure: false, // No profile for the container - }, - - // Edge cases - { - name: "no application profile", - event: createTestEvent0006("test", "/run/secrets/kubernetes.io/serviceaccount/token", []string{"O_RDONLY"}), - profile: nil, - expectFailure: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := CreateRuleR0006UnexpectedServiceAccountTokenAccess() - mockCache := &RuleObjectCacheMock{} - - if tt.profile != nil { - mockCache.SetApplicationProfile(tt.profile) - } - - result := ruleprocess.ProcessRule(r, utils.OpenEventType, tt.event, mockCache) - - if tt.expectFailure && result == nil { - t.Error("Expected rule failure but got nil") - } - - if !tt.expectFailure && result != nil { - t.Errorf("Expected no failure but got: %v", result) - } - }) - } -} diff --git a/pkg/ruleengine/v1/r0007_kubernetes_client_executed.go b/pkg/ruleengine/v1/r0007_kubernetes_client_executed.go deleted file mode 100644 index 5d4cc7d58..000000000 --- a/pkg/ruleengine/v1/r0007_kubernetes_client_executed.go +++ /dev/null @@ -1,255 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "slices" - "strings" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" -) - -const ( - R0007ID = "R0007" - R0007Name = "Kubernetes Client Executed" -) - -var kubernetesClients = []string{ - "kubectl", -} - -var R0007KubernetesClientExecutedDescriptor = ruleengine.RuleDescriptor{ - ID: R0007ID, - Name: R0007Name, - Description: "Detecting exececution of kubernetes client", - Priority: RulePriorityHigh, - Tags: []string{"exec", "malicious", "whitelisted"}, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.ExecveEventType, utils.NetworkEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0007KubernetesClientExecuted() - }, -} -var _ ruleengine.RuleEvaluator = (*R0007KubernetesClientExecuted)(nil) - -type R0007KubernetesClientExecuted struct { - BaseRule -} - -func CreateRuleR0007KubernetesClientExecuted() *R0007KubernetesClientExecuted { - return &R0007KubernetesClientExecuted{} -} - -func (rule *R0007KubernetesClientExecuted) Name() string { - return R0007Name -} - -func (rule *R0007KubernetesClientExecuted) ID() string { - return R0007ID -} - -func (rule *R0007KubernetesClientExecuted) DeleteRule() { -} - -func (rule *R0007KubernetesClientExecuted) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.ExecveEventType && eventType != utils.NetworkEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if eventType == utils.ExecveEventType { - execEvent, ok := event.(*events.ExecEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - execPath := GetExecPathFromEvent(execEvent) - if slices.Contains(kubernetesClients, filepath.Base(execPath)) || slices.Contains(kubernetesClients, execEvent.ExePath) { - return ruleengine.DetectionResult{IsFailure: true, Payload: execEvent} - } - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - networkEvent, ok := event.(*tracernetworktype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if networkEvent.PktType != "OUTGOING" { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if k8sObjCache == nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - apiServerIP := k8sObjCache.GetApiServerIpAddress() - if apiServerIP == "" || networkEvent.DstEndpoint.Addr != apiServerIP { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: networkEvent} -} - -func (rule *R0007KubernetesClientExecuted) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - if eventType == utils.ExecveEventType { - execEvent, _ := event.(*events.ExecEvent) - ap, err := GetApplicationProfile(execEvent.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - whitelistedExecs, err := GetContainerFromApplicationProfile(ap, execEvent.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - execPath := GetExecPathFromEvent(execEvent) - for _, whitelistedExec := range whitelistedExecs.Execs { - if whitelistedExec.Path == execPath { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - } else { - networkEvent, _ := event.(*tracernetworktype.Event) - nn, err := GetNetworkNeighborhood(networkEvent.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - nnContainer, err := GetContainerFromNetworkNeighborhood(nn, networkEvent.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - for _, egress := range nnContainer.Egress { - if egress.IPAddress == networkEvent.DstEndpoint.Addr { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: nil}, nil -} - -func (rule *R0007KubernetesClientExecuted) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - if eventType == utils.ExecveEventType { - execEvent, _ := event.(*events.ExecEvent) - execPath := GetExecPathFromEvent(execEvent) - upperLayer := execEvent.UpperLayer || execEvent.PupperLayer - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%s", execEvent.Comm, execPath, execEvent.Pcomm)), - AlertName: rule.Name(), - InfectedPID: execEvent.Pid, - Arguments: map[string]interface{}{ - "exec": execPath, - "args": execEvent.Args, - }, - Severity: R0007KubernetesClientExecutedDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: execEvent.Comm, - CommandLine: fmt.Sprintf("%s %s", execPath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - File: &common.FileEntity{ - Name: filepath.Base(execPath), - Directory: filepath.Dir(execPath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: execEvent.Comm, - Gid: &execEvent.Gid, - PID: execEvent.Pid, - Uid: &execEvent.Uid, - UpperLayer: &upperLayer, - PPID: execEvent.Ppid, - Pcomm: execEvent.Pcomm, - Cwd: execEvent.Cwd, - Hardlink: execEvent.ExePath, - Path: execPath, - Cmdline: fmt.Sprintf("%s %s", execPath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - ContainerID: execEvent.Runtime.ContainerID, - }, - TriggerEvent: execEvent.Event.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Kubernetes client %s was executed", execPath), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), - PodLabels: execEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: execEvent.GetExtra(), - } - } - - networkEvent, _ := event.(*tracernetworktype.Event) - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%d", networkEvent.Comm, networkEvent.DstEndpoint.Addr, networkEvent.Port)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "dstIP": networkEvent.DstEndpoint.Addr, - "port": networkEvent.Port, - "proto": networkEvent.Proto, - }, - InfectedPID: networkEvent.Pid, - Severity: R0007KubernetesClientExecutedDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: networkEvent.Comm, - }, - Network: &common.NetworkEntity{ - DstIP: networkEvent.DstEndpoint.Addr, - DstPort: int(networkEvent.Port), - Protocol: networkEvent.Proto, - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: networkEvent.Comm, - Gid: &networkEvent.Gid, - PID: networkEvent.Pid, - Uid: &networkEvent.Uid, - }, - ContainerID: networkEvent.Runtime.ContainerID, - }, - TriggerEvent: networkEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Kubernetes client executed: %s", networkEvent.Comm), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: networkEvent.GetPod(), - PodLabels: networkEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R0007KubernetesClientExecuted) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0007KubernetesClientExecutedDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0007_kubernetes_client_executed_test.go b/pkg/ruleengine/v1/r0007_kubernetes_client_executed_test.go deleted file mode 100644 index faddc3d7f..000000000 --- a/pkg/ruleengine/v1/r0007_kubernetes_client_executed_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package ruleengine - -import ( - "testing" - - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" -) - -func TestR0007KubernetesClientExecuted(t *testing.T) { - // Create a new rule - r := CreateRuleR0007KubernetesClientExecuted() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create an exec event - e := &events.ExecEvent{ - Event: tracerexectype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "/test", - Args: []string{}, - }, - } - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{} - profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ - Name: "test", - Execs: []v1beta1.ExecCalls{ - { - Path: "asdf", - Args: []string{"test"}, - }, - }, - }) - - objCache.SetApplicationProfile(profile) - } - - ruleResult := ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since test is not a k8s client") - return - } - - e.Comm = "kubectl" - - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult since exec is a k8s client") - return - } -} diff --git a/pkg/ruleengine/v1/r0008_read_env_variables_procfs.go b/pkg/ruleengine/v1/r0008_read_env_variables_procfs.go deleted file mode 100644 index 7fad84f53..000000000 --- a/pkg/ruleengine/v1/r0008_read_env_variables_procfs.go +++ /dev/null @@ -1,166 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "strings" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" -) - -const ( - R0008ID = "R0008" - R0008Name = "Read Environment Variables from procfs" -) - -var R0008ReadEnvironmentVariablesProcFSRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0008ID, - Name: R0008Name, - Description: "Detecting reading environment variables from procfs.", - Tags: []string{"env", "malicious", "whitelisted"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.OpenEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0008ReadEnvironmentVariablesProcFS() - }, -} -var _ ruleengine.RuleEvaluator = (*R0008ReadEnvironmentVariablesProcFS)(nil) - -type R0008ReadEnvironmentVariablesProcFS struct { - BaseRule - alertedPaths map[string]bool -} - -func CreateRuleR0008ReadEnvironmentVariablesProcFS() *R0008ReadEnvironmentVariablesProcFS { - return &R0008ReadEnvironmentVariablesProcFS{ - alertedPaths: make(map[string]bool), - } -} -func (rule *R0008ReadEnvironmentVariablesProcFS) Name() string { - return R0008Name -} - -func (rule *R0008ReadEnvironmentVariablesProcFS) ID() string { - return R0008ID -} - -func (rule *R0008ReadEnvironmentVariablesProcFS) DeleteRule() { -} - -func (rule *R0008ReadEnvironmentVariablesProcFS) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.OpenEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - fullEvent, ok := event.(*events.OpenEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - openEvent := fullEvent.Event - - if !strings.HasPrefix(openEvent.FullPath, "/proc/") || !strings.HasSuffix(openEvent.FullPath, "/environ") { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if rule.alertedPaths[openEvent.FullPath] { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: openEvent.FullPath} -} - -func (rule *R0008ReadEnvironmentVariablesProcFS) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - openEventTyped, _ := event.(*events.OpenEvent) - ap, err := GetApplicationProfile(openEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - appProfileOpenList, err := GetContainerFromApplicationProfile(ap, openEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - for _, open := range appProfileOpenList.Opens { - // Check if there is an open call to /proc//environ - if strings.HasPrefix(open.Path, "/proc/") && strings.HasSuffix(open.Path, "/environ") { - return ruleengine.DetectionResult{IsFailure: false, Payload: open.Path}, nil - } - } - - return detectionResult, nil -} - -func (rule *R0008ReadEnvironmentVariablesProcFS) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - fullEvent, _ := event.(*events.OpenEvent) - openEvent := fullEvent.Event - - rule.alertedPaths[openEvent.FullPath] = true - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", openEvent.Comm, openEvent.FullPath)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "path": openEvent.FullPath, - "flags": openEvent.Flags, - }, - InfectedPID: openEvent.Pid, - Severity: R0008ReadEnvironmentVariablesProcFSRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: openEvent.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(openEvent.FullPath), - Directory: filepath.Dir(openEvent.FullPath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: openEvent.Comm, - Gid: &openEvent.Gid, - PID: openEvent.Pid, - Uid: &openEvent.Uid, - }, - ContainerID: openEvent.Runtime.ContainerID, - }, - TriggerEvent: openEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Reading environment variables from procfs: %s", openEvent.GetContainer()), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: openEvent.GetPod(), - PodLabels: openEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: fullEvent.GetExtra(), - } -} - -func (rule *R0008ReadEnvironmentVariablesProcFS) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0008ReadEnvironmentVariablesProcFSRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0008_read_env_variables_procfs_test.go b/pkg/ruleengine/v1/r0008_read_env_variables_procfs_test.go deleted file mode 100644 index 30067c989..000000000 --- a/pkg/ruleengine/v1/r0008_read_env_variables_procfs_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package ruleengine - -import ( - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" - - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" -) - -func TestR0008ReadingEnvVariablesFromProcFS(t *testing.T) { - // Create a new rule - r := CreateRuleR0008ReadEnvironmentVariablesProcFS() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create a file access event - e := &traceropentype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Path: "/test", - FullPath: "/test", - Flags: []string{"O_RDONLY"}, - } - - // Test with nil appProfileAccess - ruleResult := ruleprocess.ProcessRule(r, utils.OpenEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to not be nil since no appProfile") - return - } - - // Test with whitelisted file - e.FullPath = "/proc/1/environ" - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{} - profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ - Name: "test", - Opens: []v1beta1.OpenCalls{ - { - Path: "/proc/" + dynamicpathdetector.DynamicIdentifier + "/environ", - Flags: []string{"O_RDONLY"}, - }, - }, - }) - - objCache.SetApplicationProfile(profile) - } - - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is whitelisted") - } - - // Test with non-whitelisted file - e.FullPath = "/proc/2/environ" - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to not be nil since there is a read from /environ") - } - - // Test with non /proc file - e.FullPath = "/test" - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since file is not /proc file") - } -} diff --git a/pkg/ruleengine/v1/r0009_ebpf_program_load.go b/pkg/ruleengine/v1/r0009_ebpf_program_load.go deleted file mode 100644 index 23408616b..000000000 --- a/pkg/ruleengine/v1/r0009_ebpf_program_load.go +++ /dev/null @@ -1,145 +0,0 @@ -package ruleengine - -import ( - "fmt" - "slices" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" -) - -const ( - R0009ID = "R0009" - R0009Name = "eBPF Program Load" -) - -var R0009EbpfProgramLoadRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0009ID, - Name: R0009Name, - Description: "Detecting eBPF program load.", - Tags: []string{"syscall", "ebpf"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.SyscallEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0009EbpfProgramLoad() - }, -} - -var _ ruleengine.RuleEvaluator = (*R0009EbpfProgramLoad)(nil) - -type R0009EbpfProgramLoad struct { - BaseRule - alreadyNotified bool -} - -func CreateRuleR0009EbpfProgramLoad() *R0009EbpfProgramLoad { - return &R0009EbpfProgramLoad{} -} - -func (rule *R0009EbpfProgramLoad) Name() string { - return R0009Name -} - -func (rule *R0009EbpfProgramLoad) ID() string { - return R0009ID -} -func (rule *R0009EbpfProgramLoad) DeleteRule() { -} - -func (rule *R0009EbpfProgramLoad) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if rule.alreadyNotified { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if eventType != utils.SyscallEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - syscallEvent, ok := event.(*ruleenginetypes.SyscallEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if syscallEvent.SyscallName == "bpf" { - return ruleengine.DetectionResult{IsFailure: true, Payload: syscallEvent} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R0009EbpfProgramLoad) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - syscallEventTyped, _ := event.(*ruleenginetypes.SyscallEvent) - ap, err := GetApplicationProfile(syscallEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - appProfileSyscallList, err := GetContainerFromApplicationProfile(ap, syscallEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - // Check if the syscall is in the list of allowed syscalls - if slices.Contains(appProfileSyscallList.Syscalls, syscallEventTyped.SyscallName) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - - return detectionResult, nil -} - -func (rule *R0009EbpfProgramLoad) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - syscallEvent, _ := event.(*ruleenginetypes.SyscallEvent) - rule.alreadyNotified = true - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", syscallEvent.Comm, syscallEvent.SyscallName)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "syscall": syscallEvent.SyscallName, - }, - InfectedPID: syscallEvent.Pid, - Severity: R0009EbpfProgramLoadRuleDescriptor.Priority, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: syscallEvent.Comm, - PID: syscallEvent.Pid, - }, - ContainerID: syscallEvent.Runtime.ContainerID, - }, - TriggerEvent: syscallEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("bpf system call executed in %s", syscallEvent.GetContainer()), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: syscallEvent.GetPod(), - PodLabels: syscallEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R0009EbpfProgramLoad) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0009EbpfProgramLoadRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Optional, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0009_ebpf_program_load_test.go b/pkg/ruleengine/v1/r0009_ebpf_program_load_test.go deleted file mode 100644 index fb4c69c7e..000000000 --- a/pkg/ruleengine/v1/r0009_ebpf_program_load_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package ruleengine - -import ( - "fmt" - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" - - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -func TestR0009EbpfProgramLoad(t *testing.T) { - // Create a new rule - r := CreateRuleR0009EbpfProgramLoad() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - Opens: []v1beta1.OpenCalls{ - { - Path: "/test", - Flags: []string{"O_RDONLY"}, - }, - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - } - - // Create a syscall event - e := &ruleenginetypes.SyscallEvent{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "test", - SyscallName: "test", - } - - ruleResult := ruleprocess.ProcessRule(r, utils.SyscallEventType, e, &objCache) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since syscall is not bpf") - return - } - - // Create a syscall event with bpf syscall - e.SyscallName = "bpf" - - ruleResult = ruleprocess.ProcessRule(r, utils.SyscallEventType, e, &objCache) - if ruleResult == nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be Failure because of bpf is used") - return - } -} diff --git a/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access.go b/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access.go deleted file mode 100644 index 956474374..000000000 --- a/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access.go +++ /dev/null @@ -1,219 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - "github.com/kubescape/go-logger" - "github.com/kubescape/go-logger/helpers" -) - -const ( - R0010ID = "R0010" - R0010Name = "Unexpected Sensitive File Access" -) - -var R0010UnexpectedSensitiveFileAccessRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0010ID, - Name: R0010Name, - Description: "Detecting access to sensitive files.", - Tags: []string{"files", "malicious", "whitelisted"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.OpenEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0010UnexpectedSensitiveFileAccess() - }, -} -var _ ruleengine.RuleEvaluator = (*R0010UnexpectedSensitiveFileAccess)(nil) - -type R0010UnexpectedSensitiveFileAccess struct { - BaseRule - additionalPaths []string -} - -func CreateRuleR0010UnexpectedSensitiveFileAccess() *R0010UnexpectedSensitiveFileAccess { - return &R0010UnexpectedSensitiveFileAccess{ - additionalPaths: SensitiveFiles, - } -} - -var legitimateProcessNames = []string{ - "systemd", - "sudo", - "passwd", - "chpasswd", - "useradd", - "usermod", - "chage", - "sshd", - "login", - "su", - "groupadd", - "groupmod", - "dpkg", - "rpm", - "ansible", - "puppet-agent", - "chef-client", - "vipw", - "pwck", - "grpck", - "nscd", - "cron", - "crond", - "pam", - "snap", - "apk", - "yum", - "dnf", -} - -func (rule *R0010UnexpectedSensitiveFileAccess) SetParameters(parameters map[string]interface{}) { - rule.BaseRule.SetParameters(parameters) - - additionalPathsInterface := rule.GetParameters()["additionalPaths"] - if additionalPathsInterface == nil { - return - } - - additionalPaths, ok := InterfaceToStringSlice(additionalPathsInterface) - if ok { - for _, path := range additionalPaths { - rule.additionalPaths = append(rule.additionalPaths, fmt.Sprintf("%v", path)) - } - } else { - logger.L().Warning("failed to convert additionalPaths to []string", helpers.String("ruleID", rule.ID())) - } -} - -func (rule *R0010UnexpectedSensitiveFileAccess) Name() string { - return R0010Name -} - -func (rule *R0010UnexpectedSensitiveFileAccess) ID() string { - return R0010ID -} - -func (rule *R0010UnexpectedSensitiveFileAccess) DeleteRule() { -} - -func (rule *R0010UnexpectedSensitiveFileAccess) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.OpenEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - fullEvent, ok := event.(*events.OpenEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - openEvent := fullEvent.Event - - if !utils.IsSensitivePath(openEvent.FullPath, rule.additionalPaths) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - // Running without application profile, to avoid false positives check if the process name is legitimate - for _, processName := range legitimateProcessNames { - if processName == openEvent.Comm { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: openEvent.Comm} -} - -func (rule *R0010UnexpectedSensitiveFileAccess) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - - openEventTyped, _ := event.(*events.OpenEvent) - ap, err := GetApplicationProfile(openEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - appProfileOpenList, err := GetContainerFromApplicationProfile(ap, openEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - for _, open := range appProfileOpenList.Opens { - if dynamicpathdetector.CompareDynamic(open.Path, openEventTyped.FullPath) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - - return detectionResult, nil -} - -func (rule *R0010UnexpectedSensitiveFileAccess) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - fullEvent, _ := event.(*events.OpenEvent) - openEvent := fullEvent.Event - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", openEvent.Comm, openEvent.FullPath)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "path": openEvent.FullPath, - "flags": openEvent.Flags, - }, - InfectedPID: openEvent.Pid, - Severity: R0010UnexpectedSensitiveFileAccessRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: openEvent.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(openEvent.FullPath), - Directory: filepath.Dir(openEvent.FullPath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: openEvent.Comm, - Gid: &openEvent.Gid, - PID: openEvent.Pid, - Uid: &openEvent.Uid, - }, - ContainerID: openEvent.Runtime.ContainerID, - }, - TriggerEvent: openEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Unexpected sensitive file access: %s", openEvent.FullPath), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: openEvent.GetPod(), - PodLabels: openEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: fullEvent.GetExtra(), - } -} - -func (rule *R0010UnexpectedSensitiveFileAccess) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0010UnexpectedSensitiveFileAccessRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Optional, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access_test.go b/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access_test.go deleted file mode 100644 index b726727c4..000000000 --- a/pkg/ruleengine/v1/r0010_unexpected_sensitive_file_access_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package ruleengine - -import ( - "testing" - - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" -) - -func createTestEvent(path string, flags []string) *events.OpenEvent { - return &events.OpenEvent{ - Event: traceropentype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Path: path, - FullPath: path, - Flags: flags, - }, - } -} - -func createTestProfile(containerName string, paths []string, flags []string) *v1beta1.ApplicationProfile { - opens := make([]v1beta1.OpenCalls, len(paths)) - for i, path := range paths { - opens[i] = v1beta1.OpenCalls{ - Path: path, - Flags: flags, - } - } - - return &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: containerName, - Opens: opens, - }, - }, - }, - } -} - -func TestR0010UnexpectedSensitiveFileAccess(t *testing.T) { - tests := []struct { - name string - event *events.OpenEvent - profile *v1beta1.ApplicationProfile - additionalPaths []interface{} - expectAlert bool - description string - }{ - { - name: "Relative path with dots", - event: createTestEvent("./etc/shadow", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/etc/shadow"}, []string{"O_RDONLY"}), - expectAlert: true, - description: "Should handle relative paths correctly", - }, - { - name: "No application profile", - event: createTestEvent("/test", []string{"O_RDONLY"}), - profile: nil, - expectAlert: false, - description: "Should not alert when no application profile is present", - }, - { - name: "Whitelisted non-sensitive file", - event: createTestEvent("/test", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert for whitelisted non-sensitive file", - }, - { - name: "Non-whitelisted non-sensitive file", - event: createTestEvent("/var/test1", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert for non-whitelisted non-sensitive file", - }, - { - name: "Whitelisted sensitive file", - event: createTestEvent("/etc/shadow", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/etc/shadow"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert for whitelisted sensitive file", - }, - { - name: "Non-whitelisted sensitive file", - event: createTestEvent("/etc/shadow", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), - expectAlert: true, - description: "Should alert for non-whitelisted sensitive file", - }, - { - name: "Additional sensitive path", - event: createTestEvent("/etc/custom-sensitive", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), - additionalPaths: []interface{}{"/etc/custom-sensitive"}, - expectAlert: true, - description: "Should alert for non-whitelisted file in additional sensitive paths", - }, - { - name: "Wildcard path match", - event: createTestEvent("/etc/blabla", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/etc/\u22ef"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when path matches wildcard pattern", - }, - { - name: "Path traversal attempt", - event: createTestEvent("/etc/shadow/../shadow", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), - expectAlert: true, - description: "Should alert for path traversal attempts", - }, - // Dynamic path matching tests - { - name: "Dynamic directory match", - event: createTestEvent("/var/log/2024_01_01/app.log", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/var/log/" + dynamicpathdetector.DynamicIdentifier + "/app.log"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when path matches dynamic pattern", - }, - { - name: "Dynamic multi-segment match", - event: createTestEvent("/var/log/2024/01/01/app.log", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/var/log/" + dynamicpathdetector.DynamicIdentifier + "/" + dynamicpathdetector.DynamicIdentifier + "/" + dynamicpathdetector.DynamicIdentifier + "/app.log"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when path matches multiple dynamic segments", - }, - { - name: "Dynamic prefix match", - event: createTestEvent("/data/customer1/config.json", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/" + dynamicpathdetector.DynamicIdentifier + "/customer1/config.json"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when path matches dynamic prefix", - }, - { - name: "Dynamic suffix match", - event: createTestEvent("/etc/config/v1.2.3/settings.yaml", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/etc/config/" + dynamicpathdetector.DynamicIdentifier + "/settings.yaml"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when path matches dynamic suffix", - }, - { - name: "Dynamic timestamp directory match", - event: createTestEvent("/var/log/pods/2024_01_01_12_00_00/container.log", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/var/log/pods/" + dynamicpathdetector.DynamicIdentifier + "/container.log"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when timestamp directory matches dynamic pattern", - }, - { - name: "Dynamic service account token path", - event: createTestEvent("/run/secrets/kubernetes.io/serviceaccount/..2024_01_01_12_00_00.123456789/token", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/run/secrets/kubernetes.io/serviceaccount/" + dynamicpathdetector.DynamicIdentifier + "/token"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when service account token path matches dynamic pattern", - }, - { - name: "Sensitive file with dynamic path not whitelisted", - event: createTestEvent("/etc/kubernetes/..2024_01_01/secret.yaml", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/var/log/" + dynamicpathdetector.DynamicIdentifier + "/app.log"}, []string{"O_RDONLY"}), - additionalPaths: []interface{}{"/etc/kubernetes"}, - expectAlert: true, - description: "Should alert when sensitive file with timestamp is not whitelisted", - }, - { - name: "Multiple whitelisted dynamic paths", - event: createTestEvent("/var/log/2024_01_01/app.log", []string{"O_RDONLY"}), - profile: createTestProfile("test", - []string{ - "/tmp/" + dynamicpathdetector.DynamicIdentifier + "/test.log", - "/var/log/" + dynamicpathdetector.DynamicIdentifier + "/app.log", - }, - []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when path matches one of multiple dynamic patterns", - }, - { - name: "Mixed static and dynamic segments", - event: createTestEvent("/data/users/john/2024_01_01/profile.json", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/data/users/john/" + dynamicpathdetector.DynamicIdentifier + "/profile.json"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when static segments match and dynamic segment matches timestamp", - }, - // { - // name: "Double slashes in path", - // event: createTestEvent("/etc//shadow", []string{"O_RDONLY"}), - // profile: createTestProfile("test", []string{"/etc/shadow"}, []string{"O_RDONLY"}), - // expectAlert: false, - // description: "Should normalize paths with double slashes", - // }, - { - name: "Trailing slash differences", - event: createTestEvent("/etc/kubernetes/", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/etc/kubernetes"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should handle trailing slash differences", - }, - { - name: "Partial path segment match", - event: createTestEvent("/etc/kubernetes-staging/secret", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/etc/kubernetes"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should not alert when path merely starts with a sensitive path string", - }, - { - name: "Complex dynamic pattern combination", - event: createTestEvent("/var/log/2024/01/pod-123/container-456/app.log", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{ - "/var/log/" + dynamicpathdetector.DynamicIdentifier + "/" + - dynamicpathdetector.DynamicIdentifier + "/pod-" + - dynamicpathdetector.DynamicIdentifier + "/container-" + - dynamicpathdetector.DynamicIdentifier + "/app.log"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should handle complex combinations of dynamic patterns", - }, - { - name: "Empty path handling", - event: createTestEvent("", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/test"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should handle empty paths gracefully", - }, - { - name: "Special characters in path", - event: createTestEvent("/etc/conf!g#file", []string{"O_RDONLY"}), - profile: createTestProfile("test", []string{"/etc/conf!g#file"}, []string{"O_RDONLY"}), - expectAlert: false, - description: "Should handle special characters in paths", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rule := CreateRuleR0010UnexpectedSensitiveFileAccess() - if rule == nil { - t.Fatal("Expected rule to not be nil") - } - - objCache := &RuleObjectCacheMock{} - if tt.profile != nil { - objCache.SetApplicationProfile(tt.profile) - } - - if tt.additionalPaths != nil { - rule.SetParameters(map[string]interface{}{ - "additionalPaths": tt.additionalPaths, - }) - } - - result := ruleprocess.ProcessRule(rule, utils.OpenEventType, tt.event, objCache) - - if tt.expectAlert && result == nil { - t.Errorf("%s: expected alert but got none", tt.description) - } - if !tt.expectAlert && result != nil { - t.Errorf("%s: expected no alert but got one", tt.description) - } - }) - } -} diff --git a/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic.go b/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic.go deleted file mode 100644 index e494a8177..000000000 --- a/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic.go +++ /dev/null @@ -1,219 +0,0 @@ -package ruleengine - -import ( - "bytes" - "fmt" - "net" - "time" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - "github.com/goradd/maps" - tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" -) - -const ( - R0011ID = "R0011" - R0011Name = "Unexpected Egress Network Traffic" -) - -var R0011UnexpectedEgressNetworkTrafficRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R0011ID, - Name: R0011Name, - Description: "Detecting unexpected egress network traffic that is not whitelisted by application profile.", - Tags: []string{"dns", "whitelisted", "network"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.NetworkEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR0011UnexpectedEgressNetworkTraffic() - }, -} -var _ ruleengine.RuleEvaluator = (*R0011UnexpectedEgressNetworkTraffic)(nil) - -type R0011UnexpectedEgressNetworkTraffic struct { - BaseRule - alertedAdresses maps.SafeMap[string, bool] - startTime time.Time -} - -func CreateRuleR0011UnexpectedEgressNetworkTraffic() *R0011UnexpectedEgressNetworkTraffic { - return &R0011UnexpectedEgressNetworkTraffic{startTime: time.Now()} -} - -func (rule *R0011UnexpectedEgressNetworkTraffic) Name() string { - return R0011Name -} -func (rule *R0011UnexpectedEgressNetworkTraffic) ID() string { - return R0011ID -} - -func (rule *R0011UnexpectedEgressNetworkTraffic) DeleteRule() { -} - -func (rule *R0011UnexpectedEgressNetworkTraffic) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.NetworkEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - networkEvent, ok := event.(*tracernetworktype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - // Check if the container was pre-running. - if time.Unix(int64(networkEvent.Runtime.ContainerStartedAt), 0).Before(rule.startTime) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - // Check if we already alerted on this endpoint. - endpoint := fmt.Sprintf("%s:%d:%s", networkEvent.DstEndpoint.Addr, networkEvent.Port, networkEvent.Proto) - if ok := rule.alertedAdresses.Has(endpoint); ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - // Check if the network event is outgoing and the destination is not a private IP. - if networkEvent.PktType == "OUTGOING" && !isPrivateIP(networkEvent.DstEndpoint.Addr) { - return ruleengine.DetectionResult{IsFailure: true, Payload: networkEvent.DstEndpoint.Addr} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R0011UnexpectedEgressNetworkTraffic) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - networkEventTyped, _ := event.(*tracernetworktype.Event) - nn, err := GetNetworkNeighborhood(networkEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - // Skip partially watched containers. - if annotations := nn.GetAnnotations(); annotations != nil { - if annotations["kubescape.io/completion"] == string(objectcache.WatchedContainerCompletionStatusPartial) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - - nnContainer, err := GetContainerFromNetworkNeighborhood(nn, networkEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - domain := objCache.DnsCache().ResolveIpToDomain(networkEventTyped.DstEndpoint.Addr) - if domain != "" { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - - // Check if the address is in the egress list and isn't in cluster. - for _, egress := range nnContainer.Egress { - if egress.IPAddress == networkEventTyped.DstEndpoint.Addr { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - - return detectionResult, nil -} - -func (rule *R0011UnexpectedEgressNetworkTraffic) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - networkEvent, _ := event.(*tracernetworktype.Event) - endpoint := fmt.Sprintf("%s:%d:%s", networkEvent.DstEndpoint.Addr, networkEvent.Port, networkEvent.Proto) - rule.alertedAdresses.Set(endpoint, true) - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%d", networkEvent.Comm, networkEvent.DstEndpoint.Addr, networkEvent.Port)), - AlertName: rule.Name(), - InfectedPID: networkEvent.Pid, - Arguments: map[string]interface{}{ - "ip": networkEvent.DstEndpoint.Addr, - "port": networkEvent.Port, - "proto": networkEvent.Proto, - }, - Severity: R0011UnexpectedEgressNetworkTrafficRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: networkEvent.Comm, - }, - Network: &common.NetworkEntity{ - DstIP: networkEvent.DstEndpoint.Addr, - DstPort: int(networkEvent.Port), - Protocol: networkEvent.Proto, - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: networkEvent.Comm, - Gid: &networkEvent.Gid, - PID: networkEvent.Pid, - Uid: &networkEvent.Uid, - }, - ContainerID: networkEvent.Runtime.ContainerID, - }, - TriggerEvent: networkEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Unexpected egress network communication to: %s:%d using %s from: %s", networkEvent.DstEndpoint.Addr, networkEvent.Port, networkEvent.Proto, networkEvent.GetContainer()), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: networkEvent.GetPod(), - PodLabels: networkEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R0011UnexpectedEgressNetworkTraffic) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R0011UnexpectedEgressNetworkTrafficRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.NetworkProfile, - }, - } -} - -func isPrivateIP(ip string) bool { - parsedIP := net.ParseIP(ip) - if parsedIP == nil { - return false - } - - // Check if IP is localhost - if parsedIP.IsLoopback() { - return true - } - - // Check if IP is in private IP ranges - privateIPRanges := []struct { - start net.IP - end net.IP - }{ - {net.ParseIP("10.0.0.0"), net.ParseIP("10.255.255.255")}, - {net.ParseIP("172.16.0.0"), net.ParseIP("172.31.255.255")}, - {net.ParseIP("192.168.0.0"), net.ParseIP("192.168.255.255")}, - // Class D (Multicast) - {net.ParseIP("224.0.0.0"), net.ParseIP("239.255.255.255")}, - // Class E (Experimental) - {net.ParseIP("240.0.0.0"), net.ParseIP("255.255.255.255")}, - // APIPA (sometimes used for local dns) - {net.ParseIP("169.254.0.0"), net.ParseIP("169.254.255.255")}, - } - - for _, r := range privateIPRanges { - if bytes.Compare(parsedIP, r.start) >= 0 && bytes.Compare(parsedIP, r.end) <= 0 { - return true - } - } - - return false -} diff --git a/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic_test.go b/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic_test.go deleted file mode 100644 index 05bd4b624..000000000 --- a/pkg/ruleengine/v1/r0011_unexpected_egress_network_traffic_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package ruleengine - -import ( - "testing" - "time" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -func TestR0011UnexpectedNetworkTraffic(t *testing.T) { - // Create a new rule - r := CreateRuleR0011UnexpectedEgressNetworkTraffic() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create a network request event - e := &tracernetworktype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - Runtime: eventtypes.BasicRuntimeMetadata{ - ContainerStartedAt: eventtypes.Time(time.Now().UnixNano()), - }, - }, - }, - PktType: "OUTGOING", - DstEndpoint: eventtypes.L3Endpoint{ - Addr: "1.1.1.1", - }, - Port: 80, - } - - // Test with nil network neighborhood. - ruleResult := ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to not be nil since no appProfile") - } - - // Test with whitelisted address without dns cache. - objCache := RuleObjectCacheMock{} - nn := objCache.NetworkNeighborhoodCache().GetNetworkNeighborhood("test") - if nn == nil { - nn = &v1beta1.NetworkNeighborhood{} - nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ - Name: "test", - - Egress: []v1beta1.NetworkNeighbor{ - { - DNS: "test.com", - DNSNames: []string{"test.com"}, - IPAddress: "1.1.1.1", - }, - }, - }) - - objCache.SetNetworkNeighborhood(nn) - } - - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since domain/adress is whitelisted") - } - - // Test with non-whitelisted address without dns cache. - e.DstEndpoint.Addr = "2.2.2.2" - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since domain/adress is not whitelisted") - } - - // Test with whitelisted address with dns cache. - objCache.SetDnsCache(map[string]string{"2.2.2.2": "test.com"}) - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since we are able to resolve the address") - } - - // Test with incoming packet. - e.PktType = "INCOMING" - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since packet is incoming") - } - - // Test with private address. - e.PktType = "OUTGOING" - e.DstEndpoint.Addr = "10.0.0.1" - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since address is private") - } - - // Test with non-whitelisted address with dns cache empty. - e.DstEndpoint.Addr = "4.4.4.4" - objCache.SetDnsCache(map[string]string{}) - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since we are not able to resolve the address") - } - - // Test with non-whitelisted address with nil dns cache with different port. - e.DstEndpoint.Addr = "5.5.5.5" - e.Port = 443 - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since it's not whitelisted") - } - - // Test with non-whitelisted address with nil dns cache with different port. - e.DstEndpoint.Addr = "5.5.5.5" - e.Port = 80 - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since it's not whitelisted and it's different port") - } - - // Test with non-whitelisted address with nil dns cache with different port. - e.DstEndpoint.Addr = "5.5.5.5" - e.Port = 80 - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since we already alerted on this port") - } - - // Test with non-whitelisted address with nil dns cache with different port. with partial watched container. - e.DstEndpoint.Addr = "5.5.5.5" - e.Port = 81 - originalAnnotations := nn.GetAnnotations() - nn.Annotations = map[string]string{"kubescape.io/completion": string(objectcache.WatchedContainerCompletionStatusPartial)} - objCache.SetNetworkNeighborhood(nn) - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since it's a partially watched container") - } - - // Test with non-whitelisted address with nil dns cache with different port. - nn.Annotations = originalAnnotations - objCache.SetNetworkNeighborhood(nn) - e.DstEndpoint.Addr = "5.5.5.5" - e.Port = 80 - e.Proto = "UDP" - ruleResult = ruleprocess.ProcessRule(r, utils.NetworkEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since it's a different protocol") - } -} diff --git a/pkg/ruleengine/v1/r1000_exec_from_malicious_source.go b/pkg/ruleengine/v1/r1000_exec_from_malicious_source.go deleted file mode 100644 index c4ec5af2a..000000000 --- a/pkg/ruleengine/v1/r1000_exec_from_malicious_source.go +++ /dev/null @@ -1,174 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" -) - -const ( - R1000ID = "R1000" - R1000Name = "Exec from malicious source" -) - -var R1000ExecFromMaliciousSourceDescriptor = ruleengine.RuleDescriptor{ - ID: R1000ID, - Name: R1000Name, - Description: "Detecting exec calls that are from malicious source like: /dev/shm, /proc/self", - Priority: RulePriorityMed, - Tags: []string{"exec", "signature"}, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.ExecveEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1000ExecFromMaliciousSource() - }, -} -var _ ruleengine.RuleEvaluator = (*R1000ExecFromMaliciousSource)(nil) - -type R1000ExecFromMaliciousSource struct { - BaseRule -} - -func CreateRuleR1000ExecFromMaliciousSource() *R1000ExecFromMaliciousSource { - return &R1000ExecFromMaliciousSource{} -} - -func (rule *R1000ExecFromMaliciousSource) Name() string { - return R1000Name -} - -func (rule *R1000ExecFromMaliciousSource) ID() string { - return R1000ID -} - -var whitelistedProcessesForMaliciousSource = []string{ - "systemd", - "docker", - "containerd", - "snap-confine", - "nginx", - "apache2", - "bash", - "dash", - "sh", - "supervisord", -} - -func (rule *R1000ExecFromMaliciousSource) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.ExecveEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - execEvent, ok := event.(*events.ExecEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - var maliciousExecPathPrefixes = []string{ - "/dev/shm", - } - - // Running without object cache, to avoid false positives check if the process name is legitimate - if k8sObjCache == nil { - for _, processName := range whitelistedProcessesForMaliciousSource { - if processName == execEvent.Comm { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - } - } - - execPath := GetExecFullPathFromEvent(execEvent) - execPathDir := filepath.Dir(execPath) - for _, maliciousExecPathPrefix := range maliciousExecPathPrefixes { - if strings.HasPrefix(execPathDir, maliciousExecPathPrefix) || - strings.HasPrefix(execEvent.Cwd, maliciousExecPathPrefix) || - strings.HasPrefix(execEvent.ExePath, maliciousExecPathPrefix) { - return ruleengine.DetectionResult{IsFailure: true, Payload: execEvent} - } - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1000ExecFromMaliciousSource) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - // This rule doesn't need profile evaluation since it's based on direct detection - return ruleengine.DetectionResult{IsFailure: true, Payload: nil}, nil -} - -func (rule *R1000ExecFromMaliciousSource) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - execEvent, _ := event.(*events.ExecEvent) - execPath := GetExecFullPathFromEvent(execEvent) - execPathDir := filepath.Dir(execPath) - upperLayer := execEvent.UpperLayer || execEvent.PupperLayer - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%s", execEvent.Comm, execPath, execEvent.Pcomm)), - AlertName: rule.Name(), - InfectedPID: execEvent.Pid, - Arguments: map[string]interface{}{ - "hardlink": execEvent.ExePath, - }, - Severity: R1000ExecFromMaliciousSourceDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: execEvent.Comm, - CommandLine: fmt.Sprintf("%s %s", execPath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - File: &common.FileEntity{ - Name: filepath.Base(execPath), - Directory: filepath.Dir(execPath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: execEvent.Comm, - Gid: &execEvent.Gid, - PID: execEvent.Pid, - Uid: &execEvent.Uid, - UpperLayer: &upperLayer, - PPID: execEvent.Ppid, - Pcomm: execEvent.Pcomm, - Cwd: execEvent.Cwd, - Hardlink: execEvent.ExePath, - Path: execPath, - Cmdline: fmt.Sprintf("%s %s", GetExecPathFromEvent(execEvent), strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - ContainerID: execEvent.Runtime.ContainerID, - }, - TriggerEvent: execEvent.Event.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Execution from malicious source: %s", execPathDir), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), - PodLabels: execEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: execEvent.GetExtra(), - } -} - -func (rule *R1000ExecFromMaliciousSource) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1000ExecFromMaliciousSourceDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.NotRequired, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1000_exec_from_malicious_source_test.go b/pkg/ruleengine/v1/r1000_exec_from_malicious_source_test.go deleted file mode 100644 index 951c8879e..000000000 --- a/pkg/ruleengine/v1/r1000_exec_from_malicious_source_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package ruleengine - -import ( - "testing" - - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" -) - -func TestR1000ExecFromMaliciousSource(t *testing.T) { - // Create a new rule - r := CreateRuleR1000ExecFromMaliciousSource() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - // Create an exec event - e := &events.ExecEvent{ - Event: tracerexectype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "/test", - Args: []string{}, - }, - } - - ruleResult := ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since test is not a malicious exec") - } - - e.Cwd = "/" - - e.Comm = "/run.sh" - - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since exec is not malicious") - } - - e.Comm = "./run.sh" - - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since exec is not malicious") - } - - e.Comm = "/dev/shm/run.sh" - - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult == nil { - t.Errorf("Expected ruleResult since exec is malicious") - } - - e.Comm = "./dev/shm/run.sh" - - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult == nil { - t.Errorf("Expected ruleResult since exec is malicious") - } - - e.Cwd = "/dev/shm" - e.Comm = "./run.sh" - - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult == nil { - t.Errorf("Expected ruleResult since exec is malicious") - } - - e.Comm = "./run.sh -al" - - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult == nil { - t.Errorf("Expected ruleResult since exec is malicious") - } - - // Create an exec event simulating the motd scenario - e = &events.ExecEvent{ - Event: tracerexectype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "50-motd-news", - Args: []string{"/bin/sh", "/etc/update-motd.d/50-motd-news", "--force"}, - ExePath: "/bin/sh", // The actual executable - Cwd: "/", - }, - } - - // This should not trigger a rule failure - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Got false positive alert for legitimate motd execution:\nCwd: %s\nExePath: %s\nArgs: %v", - e.Cwd, e.ExePath, e.Args) - } - - // For comparison, test a real malicious case - e.ExePath = "/dev/shm/malicious" - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult == nil { - t.Errorf("Failed to detect actually malicious execution from /dev/shm") - } -} diff --git a/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image.go b/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image.go deleted file mode 100644 index a145fe8a4..000000000 --- a/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image.go +++ /dev/null @@ -1,154 +0,0 @@ -package ruleengine - -import ( - "errors" - "fmt" - "path/filepath" - "strings" - - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" -) - -const ( - R1001ID = "R1001" - R1001Name = "Exec Binary Not In Base Image" -) - -var R1001ExecBinaryNotInBaseImageRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1001ID, - Name: R1001Name, - Description: "Detecting exec calls of binaries that are not included in the base image", - Tags: []string{"exec", "malicious", "binary", "base image"}, - Priority: RulePriorityHigh, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.ExecveEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1001ExecBinaryNotInBaseImage() - }, -} -var _ ruleengine.RuleEvaluator = (*R1001ExecBinaryNotInBaseImage)(nil) - -type R1001ExecBinaryNotInBaseImage struct { - BaseRule -} - -func CreateRuleR1001ExecBinaryNotInBaseImage() *R1001ExecBinaryNotInBaseImage { - return &R1001ExecBinaryNotInBaseImage{} -} - -func (rule *R1001ExecBinaryNotInBaseImage) Name() string { - return R1001Name -} - -func (rule *R1001ExecBinaryNotInBaseImage) ID() string { - return R1001ID -} - -func (rule *R1001ExecBinaryNotInBaseImage) DeleteRule() { -} - -func (rule *R1001ExecBinaryNotInBaseImage) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.ExecveEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - execEvent, ok := event.(*events.ExecEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if execEvent.UpperLayer || execEvent.PupperLayer { - return ruleengine.DetectionResult{IsFailure: true, Payload: execEvent} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1001ExecBinaryNotInBaseImage) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - execEvent, ok := event.(*events.ExecEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - - // Check if the event is expected, if so return nil - whiteListed, err := IsExecEventInProfile(execEvent, objCache, false) - if whiteListed { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } else if err != nil && !errors.Is(err, ProfileNotFound) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: execEvent}, nil -} - -func (rule *R1001ExecBinaryNotInBaseImage) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - execEvent, _ := event.(*events.ExecEvent) - upperLayer := true - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%s", execEvent.Comm, execEvent.ExePath, execEvent.Pcomm)), - AlertName: rule.Name(), - InfectedPID: execEvent.Pid, - Severity: R1001ExecBinaryNotInBaseImageRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: execEvent.Comm, - CommandLine: fmt.Sprintf("%s %s", execEvent.ExePath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - File: &common.FileEntity{ - Name: filepath.Base(GetExecFullPathFromEvent(execEvent)), - Directory: filepath.Dir(GetExecFullPathFromEvent(execEvent)), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: execEvent.Comm, - Gid: &execEvent.Gid, - PID: execEvent.Pid, - Uid: &execEvent.Uid, - UpperLayer: &upperLayer, - PPID: execEvent.Ppid, - Pcomm: execEvent.Pcomm, - Cwd: execEvent.Cwd, - Hardlink: execEvent.ExePath, - Path: GetExecFullPathFromEvent(execEvent), - Cmdline: fmt.Sprintf("%s %s", GetExecPathFromEvent(execEvent), strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - ContainerID: execEvent.Runtime.ContainerID, - }, - TriggerEvent: execEvent.Event.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Process (%s) was executed and is not part of the image", execEvent.Comm), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), - PodLabels: execEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: execEvent.GetExtra(), - } -} - -func (rule *R1001ExecBinaryNotInBaseImage) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1001ExecBinaryNotInBaseImageRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Optional, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image_test.go b/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image_test.go deleted file mode 100644 index 61dffd962..000000000 --- a/pkg/ruleengine/v1/r1001_exec_binary_not_in_base_image_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package ruleengine - -import ( - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" -) - -func TestR1001ExecBinaryNotInBaseImage(t *testing.T) { - // Create a new rule - r := CreateRuleR1001ExecBinaryNotInBaseImage() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - // Create an exec event - e := &tracerexectype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "/test", - Args: []string{}, - UpperLayer: false, - } - - // Test with non-existing binary - ruleResult := ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since exec is not in the upper layer") - } -} diff --git a/pkg/ruleengine/v1/r1002_load_kernel_module.go b/pkg/ruleengine/v1/r1002_load_kernel_module.go deleted file mode 100644 index 358d798c4..000000000 --- a/pkg/ruleengine/v1/r1002_load_kernel_module.go +++ /dev/null @@ -1,127 +0,0 @@ -package ruleengine - -import ( - "fmt" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" - - apitypes "github.com/armosec/armoapi-go/armotypes" -) - -const ( - R1002ID = "R1002" - R1002Name = "Kernel Module Load" -) - -var R1002LoadKernelModuleRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1002ID, - Name: R1002Name, - Description: "Detecting Kernel Module Load.", - Tags: []string{"syscall", "kernel", "module", "load"}, - Priority: RulePriorityCritical, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.SyscallEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1002LoadKernelModule() - }, -} -var _ ruleengine.RuleEvaluator = (*R1002LoadKernelModule)(nil) - -type R1002LoadKernelModule struct { - BaseRule - alerted bool -} - -func CreateRuleR1002LoadKernelModule() *R1002LoadKernelModule { - return &R1002LoadKernelModule{} -} - -func (rule *R1002LoadKernelModule) Name() string { - return R1002Name -} -func (rule *R1002LoadKernelModule) ID() string { - return R1002ID -} -func (rule *R1002LoadKernelModule) DeleteRule() { -} - -func (rule *R1002LoadKernelModule) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if rule.alerted { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if eventType != utils.SyscallEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - syscallEvent, ok := event.(*ruleenginetypes.SyscallEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if syscallEvent.SyscallName == "init_module" || syscallEvent.SyscallName == "finit_module" { - return ruleengine.DetectionResult{IsFailure: true, Payload: syscallEvent} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1002LoadKernelModule) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - // This rule doesn't need profile evaluation since it's based on direct detection - return detectionResult, nil -} - -func (rule *R1002LoadKernelModule) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - syscallEvent, _ := event.(*ruleenginetypes.SyscallEvent) - rule.alerted = true - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(syscallEvent.SyscallName), - AlertName: rule.Name(), - InfectedPID: syscallEvent.Pid, - Severity: R1002LoadKernelModuleRuleDescriptor.Priority, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: syscallEvent.Comm, - Gid: &syscallEvent.Gid, - PID: syscallEvent.Pid, - Uid: &syscallEvent.Uid, - }, - ContainerID: syscallEvent.Runtime.ContainerID, - }, - TriggerEvent: syscallEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Kernel module load syscall (%s) was called", syscallEvent.SyscallName), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: syscallEvent.GetPod(), - PodLabels: syscallEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R1002LoadKernelModule) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1002LoadKernelModuleRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.NotRequired, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1002_load_kernel_module_test.go b/pkg/ruleengine/v1/r1002_load_kernel_module_test.go deleted file mode 100644 index 20c4e6293..000000000 --- a/pkg/ruleengine/v1/r1002_load_kernel_module_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package ruleengine - -import ( - "fmt" - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" -) - -func TestR1002LoadKernelModule(t *testing.T) { - // Create a new rule - r := CreateRuleR1002LoadKernelModule() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create a syscall event - e := &ruleenginetypes.SyscallEvent{ - Comm: "test", - SyscallName: "test", - } - - ruleResult := ruleprocess.ProcessRule(r, utils.SyscallEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since syscall is not init_module") - } - - // Create a syscall event with init_module syscall - e.SyscallName = "init_module" - - ruleResult = ruleprocess.ProcessRule(r, utils.SyscallEventType, e, &RuleObjectCacheMock{}) - if ruleResult == nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be Failure because of init_module is not allowed") - } - - // Create a syscall event with finit_module syscall - r2 := CreateRuleR1002LoadKernelModule() - e.SyscallName = "finit_module" - - ruleResult = ruleprocess.ProcessRule(r2, utils.SyscallEventType, e, &RuleObjectCacheMock{}) - if ruleResult == nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be Failure because of finit_module is not allowed") - } -} diff --git a/pkg/ruleengine/v1/r1003_malicious_ssh_connection.go b/pkg/ruleengine/v1/r1003_malicious_ssh_connection.go deleted file mode 100644 index f1a58270f..000000000 --- a/pkg/ruleengine/v1/r1003_malicious_ssh_connection.go +++ /dev/null @@ -1,242 +0,0 @@ -package ruleengine - -import ( - "fmt" - "os" - "slices" - "strconv" - "strings" - - "github.com/goradd/maps" - "github.com/kubescape/go-logger/helpers" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - - "github.com/kubescape/go-logger" - tracersshtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" -) - -const ( - R1003ID = "R1003" - R1003Name = "Malicious SSH Connection" -) - -var R1003MaliciousSSHConnectionRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1003ID, - Name: R1003Name, - Description: "Detecting ssh connection to disallowed port", - Tags: []string{"ssh", "connection", "port", "malicious"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.SSHEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1003MaliciousSSHConnection() - }, -} - -var _ ruleengine.RuleEvaluator = (*R1003MaliciousSSHConnection)(nil) - -type R1003MaliciousSSHConnection struct { - BaseRule - allowedPorts []uint16 - ephemeralPortRange [2]uint16 - requests maps.SafeMap[string, string] // Mapping of src IP to dst IP -} - -// ReadPortRange reads the two port numbers from /proc/sys/net/ipv4/ip_local_port_range -func ReadPortRange() ([2]uint16, error) { - // Default port range - var startPort, endPort uint16 = 32768, 60999 - - // Read the contents of the file - data, err := os.ReadFile("/proc/sys/net/ipv4/ip_local_port_range") - if err != nil { - return [2]uint16{startPort, endPort}, fmt.Errorf("failed to read port range file: %v", err) - } - - // Convert the data to a string and split by spaces - ports := strings.Fields(string(data)) - if len(ports) != 2 { - return [2]uint16{startPort, endPort}, fmt.Errorf("unexpected format in port range file") - } - - // Convert the port strings to integers - startPortInt, err := strconv.Atoi(ports[0]) - if err != nil { - return [2]uint16{startPort, endPort}, fmt.Errorf("failed to convert start port: %v", err) - } - - endPortInt, err := strconv.Atoi(ports[1]) - if err != nil { - return [2]uint16{startPort, endPort}, fmt.Errorf("failed to convert end port: %v", err) - } - - if startPortInt < 0 || startPortInt > 65535 || endPortInt < 0 || endPortInt > 65535 { - return [2]uint16{startPort, endPort}, fmt.Errorf("invalid port range") - } - - return [2]uint16{uint16(startPortInt), uint16(endPortInt)}, nil -} - -func CreateRuleR1003MaliciousSSHConnection() *R1003MaliciousSSHConnection { - ephemeralPorts, err := ReadPortRange() - if err != nil { - logger.L().Warning("Failed to read port range, setting to default range:", helpers.Error(err)) - } - return &R1003MaliciousSSHConnection{ - allowedPorts: []uint16{22, 2022}, - ephemeralPortRange: ephemeralPorts, - } -} -func (rule *R1003MaliciousSSHConnection) Name() string { - return R1003Name -} - -func (rule *R1003MaliciousSSHConnection) ID() string { - return R1003ID -} - -func (rule *R1003MaliciousSSHConnection) SetParameters(params map[string]interface{}) { - if allowedPortsInterface, ok := params["allowedPorts"].([]interface{}); ok { - if len(allowedPortsInterface) == 0 { - logger.L().Fatal("Allowed ports cannot be empty") - return - } - - var allowedPorts []uint16 - for _, port := range allowedPortsInterface { - if convertedPort, ok := port.(float64); ok { - allowedPorts = append(allowedPorts, uint16(convertedPort)) - } else { - logger.L().Fatal("Failed to convert allowed port to uint16") - return - } - } - rule.allowedPorts = allowedPorts - } else { - logger.L().Fatal("Failed to convert allowed ports to []interface{}") - return - } -} - -func (rule *R1003MaliciousSSHConnection) DeleteRule() { -} - -func (rule *R1003MaliciousSSHConnection) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.SSHEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - sshEvent, ok := event.(*tracersshtype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - // Check only outgoing packets (source port is ephemeral) - if sshEvent.SrcPort < rule.ephemeralPortRange[0] || sshEvent.SrcPort > rule.ephemeralPortRange[1] { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if !slices.Contains(rule.allowedPorts, sshEvent.DstPort) { - // Check if the event is a response to a request we have already seen - if rule.requests.Has(sshEvent.DstIP) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - return ruleengine.DetectionResult{IsFailure: true, Payload: sshEvent} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1003MaliciousSSHConnection) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - sshEventTyped, _ := event.(*tracersshtype.Event) - nn, err := GetNetworkNeighborhood(sshEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - nnContainer, err := GetContainerFromNetworkNeighborhood(nn, sshEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - for _, egress := range nnContainer.Egress { - if egress.IPAddress == sshEventTyped.DstIP { - for _, port := range egress.Ports { - if port.Port != nil { - if uint16(*port.Port) == sshEventTyped.DstPort { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - } - } - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: nil}, nil -} - -func (rule *R1003MaliciousSSHConnection) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - sshEvent, _ := event.(*tracersshtype.Event) - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%d", sshEvent.Comm, sshEvent.DstIP, sshEvent.DstPort)), - AlertName: rule.Name(), - InfectedPID: sshEvent.Pid, - Arguments: map[string]interface{}{ - "dstIP": sshEvent.DstIP, - "dstPort": sshEvent.DstPort, - }, - Severity: R1003MaliciousSSHConnectionRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: sshEvent.Comm, - }, - Network: &common.NetworkEntity{ - DstIP: sshEvent.DstIP, - DstPort: int(sshEvent.DstPort), - Protocol: "TCP", - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: sshEvent.Comm, - Gid: &sshEvent.Gid, - PID: sshEvent.Pid, - Uid: &sshEvent.Uid, - }, - ContainerID: sshEvent.Runtime.ContainerID, - }, - TriggerEvent: sshEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Malicious SSH connection attempt to %s:%d", sshEvent.DstIP, sshEvent.DstPort), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: sshEvent.GetPod(), - PodLabels: sshEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R1003MaliciousSSHConnection) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1003MaliciousSSHConnectionRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Optional, - ProfileType: apitypes.NetworkProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1003_malicious_ssh_connection_test.go b/pkg/ruleengine/v1/r1003_malicious_ssh_connection_test.go deleted file mode 100644 index 7913c326a..000000000 --- a/pkg/ruleengine/v1/r1003_malicious_ssh_connection_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package ruleengine - -import ( - "testing" - - tracersshtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - "k8s.io/utils/ptr" - - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" -) - -func TestR1003DisallowedSSHConnectionPort_ProcessEvent(t *testing.T) { - rule := CreateRuleR1003MaliciousSSHConnection() - - sshEvent := &tracersshtype.Event{ - Event: eventtypes.Event{ - Timestamp: 2, - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - PodName: "test", - Namespace: "test", - }, - }, - Runtime: eventtypes.BasicRuntimeMetadata{ - ContainerID: "test", - ContainerName: "test", - }, - }, - }, - SrcIP: "1.1.1.1", - DstIP: "2.2.2.2", - DstPort: 22, - SrcPort: 33333, - } - - // Test with whitelisted address without dns cache. - objCache := RuleObjectCacheMock{} - nn := objCache.NetworkNeighborhoodCache().GetNetworkNeighborhood("test") - if nn == nil { - nn = &v1beta1.NetworkNeighborhood{} - nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ - Name: "test", - - Egress: []v1beta1.NetworkNeighbor{ - { - DNS: "test.com", - DNSNames: []string{"test.com"}, - IPAddress: "1.1.1.1", - Ports: []v1beta1.NetworkPort{ - { - Port: ptr.To(int32(2023)), - }, - }, - }, - }, - }) - - objCache.SetNetworkNeighborhood(nn) - } - - failure := ruleprocess.ProcessRule(rule, utils.SSHEventType, sshEvent, &objCache) - if failure != nil { - t.Errorf("Expected nil since the SSH connection is to an allowed port, got %v", failure) - } - - // Test disallowed port - sshEvent.DstPort = 1234 - failure = ruleprocess.ProcessRule(rule, utils.SSHEventType, sshEvent, &objCache) - if failure == nil { - t.Errorf("Expected failure since the SSH connection is to a disallowed port, got nil") - } - - // Test disallowed port that is in the egress list - sshEvent.DstPort = 2023 - failure = ruleprocess.ProcessRule(rule, utils.SSHEventType, sshEvent, &objCache) - if failure == nil { - t.Errorf("Expected failure since the SSH connection is to a disallowed port, got nil") - } - - // Test allowed port - sshEvent.DstPort = 2022 - sshEvent.DstIP = "3.3.3.3" - failure = ruleprocess.ProcessRule(rule, utils.SSHEventType, sshEvent, &objCache) - if failure != nil { - t.Errorf("Expected nil since the SSH connection is to an allowed port, got %v", failure) - } -} diff --git a/pkg/ruleengine/v1/r1004_exec_from_mount.go b/pkg/ruleengine/v1/r1004_exec_from_mount.go deleted file mode 100644 index 0b64418bc..000000000 --- a/pkg/ruleengine/v1/r1004_exec_from_mount.go +++ /dev/null @@ -1,164 +0,0 @@ -package ruleengine - -import ( - "errors" - "fmt" - "path/filepath" - "strings" - - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" -) - -const ( - R1004ID = "R1004" - R1004Name = "Exec from mount" -) - -var R1004ExecFromMountRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1004ID, - Name: R1004Name, - Description: "Detecting exec calls from mounted paths.", - Tags: []string{"exec", "mount"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{utils.ExecveEventType}, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1004ExecFromMount() - }, -} - -type R1004ExecFromMount struct { - BaseRule -} - -func CreateRuleR1004ExecFromMount() *R1004ExecFromMount { - return &R1004ExecFromMount{} -} -func (rule *R1004ExecFromMount) Name() string { - return R1004Name -} - -func (rule *R1004ExecFromMount) ID() string { - return R1004ID -} - -func (rule *R1004ExecFromMount) DeleteRule() { -} - -func (rule *R1004ExecFromMount) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.ExecveEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - execEvent, ok := event.(*events.ExecEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - mounts, err := GetContainerMountPaths(execEvent.GetNamespace(), execEvent.GetPod(), execEvent.GetContainer(), k8sObjCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - for _, mount := range mounts { - fullPath := GetExecFullPathFromEvent(execEvent) - if rule.isPathContained(fullPath, mount) || rule.isPathContained(execEvent.ExePath, mount) { - return ruleengine.DetectionResult{IsFailure: true, Payload: execEvent} - } - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1004ExecFromMount) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - execEventTyped, _ := event.(*events.ExecEvent) - whiteListed, err := IsExecEventInProfile(execEventTyped, objCache, false) - if whiteListed { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } else if err != nil && !errors.Is(err, ProfileNotFound) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - return detectionResult, nil -} - -func (rule *R1004ExecFromMount) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - execEvent, _ := event.(*events.ExecEvent) - upperLayer := execEvent.UpperLayer || execEvent.PupperLayer - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%s", execEvent.Comm, execEvent.ExePath, execEvent.Pcomm)), - AlertName: rule.Name(), - InfectedPID: execEvent.Pid, - Arguments: map[string]interface{}{ - "exec": execEvent.ExePath, - "args": execEvent.Args, - }, - Severity: R1004ExecFromMountRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: execEvent.Comm, - CommandLine: fmt.Sprintf("%s %s", GetExecFullPathFromEvent(execEvent), strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - File: &common.FileEntity{ - Name: filepath.Base(GetExecFullPathFromEvent(execEvent)), - Directory: filepath.Dir(GetExecFullPathFromEvent(execEvent)), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: execEvent.Comm, - Gid: &execEvent.Gid, - PID: execEvent.Pid, - Uid: &execEvent.Uid, - UpperLayer: &upperLayer, - PPID: execEvent.Ppid, - Pcomm: execEvent.Pcomm, - Cwd: execEvent.Cwd, - Hardlink: execEvent.ExePath, - Path: GetExecFullPathFromEvent(execEvent), - Cmdline: fmt.Sprintf("%s %s", GetExecPathFromEvent(execEvent), strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - ContainerID: execEvent.Runtime.ContainerID, - }, - TriggerEvent: execEvent.Event.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Process (%s) was executed from a mounted path", GetExecFullPathFromEvent(execEvent)), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), - PodLabels: execEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: execEvent.GetExtra(), - } -} - -func (rule *R1004ExecFromMount) isPathContained(targetpath, basepath string) bool { - return strings.HasPrefix(targetpath, basepath) -} - -func (rule *R1004ExecFromMount) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1004ExecFromMountRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Optional, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1004_exec_from_mount_test.go b/pkg/ruleengine/v1/r1004_exec_from_mount_test.go deleted file mode 100644 index 73f3853b9..000000000 --- a/pkg/ruleengine/v1/r1004_exec_from_mount_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package ruleengine - -import ( - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - corev1 "k8s.io/api/core/v1" -) - -func TestR1004ExecFromMount(t *testing.T) { - // Create a new rule - r := CreateRuleR1004ExecFromMount() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - e := &events.ExecEvent{ - Event: tracerexectype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - Runtime: eventtypes.BasicRuntimeMetadata{ContainerID: "test"}, - }, - }, - Comm: "/test", - Args: []string{}, - }, - } - - // Test case where path is not mounted - ruleResult := ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since test is not from a mounted path") - } - - // Test case where path is mounted, but not application profile is found - e.Comm = "/var/test1/test" - objCache := RuleObjectCacheMock{} - objCache.SetPodSpec( - &corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "test", - MountPath: "/var/test1", - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "test", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/test1", - }, - }, - }, - }, - }, - ) - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since no application profile is found") - } - - // Test case where path is mounted, with application profile - objCache.SetApplicationProfile(&v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - Execs: []v1beta1.ExecCalls{{Path: "/var/other/test"}}, - }, - }, - }, - }) - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult since exec is from a mounted path") - } -} diff --git a/pkg/ruleengine/v1/r1005_fileless_execution.go b/pkg/ruleengine/v1/r1005_fileless_execution.go deleted file mode 100644 index e187a139a..000000000 --- a/pkg/ruleengine/v1/r1005_fileless_execution.go +++ /dev/null @@ -1,171 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "strings" - - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" -) - -const ( - R1005ID = "R1005" - R1005Name = "Fileless Execution" -) - -var R1005FilelessExecutionRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1005ID, - Name: R1005Name, - Description: "Detecting Fileless Execution", - Tags: []string{"fileless", "execution"}, - Priority: RulePriorityHigh, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.ExecveEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1005FilelessExecution() - }, -} - -var _ ruleengine.RuleEvaluator = (*R1005FilelessExecution)(nil) - -type R1005FilelessExecution struct { - BaseRule -} - -func CreateRuleR1005FilelessExecution() *R1005FilelessExecution { - return &R1005FilelessExecution{} -} - -func (rule *R1005FilelessExecution) Name() string { - return R1005Name -} - -func (rule *R1005FilelessExecution) ID() string { - return R1005ID -} -func (rule *R1005FilelessExecution) DeleteRule() { -} - -func (rule *R1005FilelessExecution) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.ExecveEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - execEvent, ok := event.(*events.ExecEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if !strings.Contains(execEvent.ExePath, "memfd") { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - execFullPath := GetExecFullPathFromEvent(execEvent) - execPathDir := filepath.Dir(execFullPath) - - // Check for any /proc/*/fd/* or /proc/self/fd/* patterns - isProcFd := func(path string) bool { - if strings.HasPrefix(path, "/proc/self/fd") { - return true - } - // Match pattern like /proc/1/fd/7 - parts := strings.Split(path, "/") - if len(parts) >= 4 && - parts[1] == "proc" && - parts[3] == "fd" { - return true - } - return false - } - - if isProcFd(execPathDir) || isProcFd(execEvent.Cwd) || isProcFd(execEvent.ExePath) { - return ruleengine.DetectionResult{IsFailure: true, Payload: execEvent} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1005FilelessExecution) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - // This rule doesn't need profile evaluation since it's based on direct detection - return detectionResult, nil -} - -func (rule *R1005FilelessExecution) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - execEvent, _ := payload.Payload.(*events.ExecEvent) - execFullPath := GetExecFullPathFromEvent(execEvent) - execPathDir := filepath.Dir(execFullPath) - upperLayer := execEvent.UpperLayer || execEvent.PupperLayer - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%s", execEvent.Comm, execEvent.ExePath, execEvent.Pcomm)), - AlertName: rule.Name(), - InfectedPID: execEvent.Pid, - Arguments: map[string]interface{}{ - "hardlink": execEvent.ExePath, - }, - Severity: R1005FilelessExecutionRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: execEvent.Comm, - CommandLine: fmt.Sprintf("%s %s", execFullPath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - File: &common.FileEntity{ - Name: filepath.Base(execFullPath), - Directory: execPathDir, - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: execEvent.Comm, - Gid: &execEvent.Gid, - PID: execEvent.Pid, - Uid: &execEvent.Uid, - UpperLayer: &upperLayer, - PPID: execEvent.Ppid, - Pcomm: execEvent.Pcomm, - Cwd: execEvent.Cwd, - Hardlink: execEvent.ExePath, - Path: execFullPath, - Cmdline: fmt.Sprintf("%s %s", GetExecPathFromEvent(execEvent), strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - ContainerID: execEvent.Runtime.ContainerID, - }, - TriggerEvent: execEvent.Event.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Fileless execution detected: exec call \"%s\" is from a malicious source %s", execPathDir, execEvent.ExePath), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), - PodLabels: execEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: execEvent.GetExtra(), - } -} - -func (rule *R1005FilelessExecution) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1005FilelessExecutionRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileType: apitypes.ApplicationProfile, - ProfileDependency: apitypes.NotRequired, - }, - } -} diff --git a/pkg/ruleengine/v1/r1006_unshare_system_call.go b/pkg/ruleengine/v1/r1006_unshare_system_call.go deleted file mode 100644 index 577f0db6f..000000000 --- a/pkg/ruleengine/v1/r1006_unshare_system_call.go +++ /dev/null @@ -1,129 +0,0 @@ -package ruleengine - -import ( - "fmt" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" - - apitypes "github.com/armosec/armoapi-go/armotypes" -) - -const ( - R1006ID = "R1006" - R1006Name = "Unshare System Call usage" -) - -var R1006UnshareSyscallRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1006ID, - Name: R1006Name, - Description: "Detecting Unshare System Call usage, which can be used to escape container.", - Tags: []string{"syscall", "escape", "unshare"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.SyscallEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1006UnshareSyscall() - }, -} - -var _ ruleengine.RuleEvaluator = (*R1006UnshareSyscall)(nil) - -type R1006UnshareSyscall struct { - BaseRule - alreadyNotified bool -} - -func CreateRuleR1006UnshareSyscall() *R1006UnshareSyscall { - return &R1006UnshareSyscall{alreadyNotified: false} -} - -func (rule *R1006UnshareSyscall) Name() string { - return R1006Name -} - -func (rule *R1006UnshareSyscall) ID() string { - return R1006ID -} -func (rule *R1006UnshareSyscall) DeleteRule() { -} - -func (rule *R1006UnshareSyscall) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if rule.alreadyNotified { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if eventType != utils.SyscallEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - syscallEvent, ok := event.(*ruleenginetypes.SyscallEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if syscallEvent.SyscallName == "unshare" { - return ruleengine.DetectionResult{IsFailure: true, Payload: syscallEvent} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1006UnshareSyscall) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - // This rule doesn't need profile evaluation since it's based on direct detection - return detectionResult, nil -} - -func (rule *R1006UnshareSyscall) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - syscallEvent, _ := event.(*ruleenginetypes.SyscallEvent) - rule.alreadyNotified = true - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(syscallEvent.SyscallName), - AlertName: rule.Name(), - InfectedPID: syscallEvent.Pid, - Severity: R1006UnshareSyscallRuleDescriptor.Priority, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: syscallEvent.Comm, - Gid: &syscallEvent.Gid, - PID: syscallEvent.Pid, - Uid: &syscallEvent.Uid, - }, - ContainerID: syscallEvent.Runtime.ContainerID, - }, - TriggerEvent: syscallEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("unshare system call executed in %s", syscallEvent.GetContainer()), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: syscallEvent.GetPod(), - PodLabels: syscallEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R1006UnshareSyscall) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1006UnshareSyscallRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileType: apitypes.ApplicationProfile, - ProfileDependency: apitypes.NotRequired, - }, - } -} diff --git a/pkg/ruleengine/v1/r1006_unshare_system_call_test.go b/pkg/ruleengine/v1/r1006_unshare_system_call_test.go deleted file mode 100644 index 47d12757f..000000000 --- a/pkg/ruleengine/v1/r1006_unshare_system_call_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package ruleengine - -import ( - "fmt" - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" -) - -func TestR1006UnshareSyscall(t *testing.T) { - // Create a new rule - r := CreateRuleR1006UnshareSyscall() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create a syscall event - e := &ruleenginetypes.SyscallEvent{ - Comm: "test", - SyscallName: "test", - } - - ruleResult := ruleprocess.ProcessRule(r, utils.SyscallEventType, e, &RuleObjectCacheMock{}) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since syscall is not unshare") - return - } - - // Create a syscall event with unshare syscall - e.SyscallName = "unshare" - - ruleResult = ruleprocess.ProcessRule(r, utils.SyscallEventType, e, &RuleObjectCacheMock{}) - if ruleResult == nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be Failure because of unshare is used") - return - } -} diff --git a/pkg/ruleengine/v1/r1007_xmr_crypto_mining.go b/pkg/ruleengine/v1/r1007_xmr_crypto_mining.go deleted file mode 100644 index 5d89c7ac3..000000000 --- a/pkg/ruleengine/v1/r1007_xmr_crypto_mining.go +++ /dev/null @@ -1,135 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - tracerrandomxtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/randomx/types" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" -) - -const ( - R1007ID = "R1007" - R1007Name = "XMR Crypto Mining Detection" -) - -var R1007XMRCryptoMiningRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1007ID, - Name: R1007Name, - Description: "Detecting XMR Crypto Miners by randomx algorithm usage.", - Tags: []string{"crypto", "miners", "malicious"}, - Priority: RulePriorityCritical, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.RandomXEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1007XMRCryptoMining() - }, -} - -var _ ruleengine.RuleEvaluator = (*R1007XMRCryptoMining)(nil) - -type R1007XMRCryptoMining struct { - BaseRule -} - -func CreateRuleR1007XMRCryptoMining() *R1007XMRCryptoMining { - return &R1007XMRCryptoMining{} -} - -func (rule *R1007XMRCryptoMining) Name() string { - return R1007Name -} - -func (rule *R1007XMRCryptoMining) ID() string { - return R1007ID -} - -func (rule *R1007XMRCryptoMining) DeleteRule() { -} - -func (rule *R1007XMRCryptoMining) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.RandomXEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - randomXEvent, ok := event.(*tracerrandomxtype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: randomXEvent} -} - -func (rule *R1007XMRCryptoMining) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - // This rule doesn't need profile evaluation since it's based on direct detection - return detectionResult, nil -} - -func (rule *R1007XMRCryptoMining) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - randomXEvent, _ := event.(*tracerrandomxtype.Event) - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", randomXEvent.ExePath, randomXEvent.Comm)), - AlertName: rule.Name(), - InfectedPID: randomXEvent.Pid, - Severity: R1007XMRCryptoMiningRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: randomXEvent.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(randomXEvent.ExePath), - Directory: filepath.Dir(randomXEvent.ExePath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: randomXEvent.Comm, - Gid: &randomXEvent.Gid, - PID: randomXEvent.Pid, - Uid: &randomXEvent.Uid, - UpperLayer: &randomXEvent.UpperLayer, - PPID: randomXEvent.PPid, - Hardlink: randomXEvent.ExePath, - Path: randomXEvent.ExePath, - }, - ContainerID: randomXEvent.Runtime.ContainerID, - }, - TriggerEvent: randomXEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("XMR Crypto Miner process: (%s) executed", randomXEvent.ExePath), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: randomXEvent.GetPod(), - PodLabels: randomXEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R1007XMRCryptoMining) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1007XMRCryptoMiningRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileType: apitypes.ApplicationProfile, - ProfileDependency: apitypes.NotRequired, - }, - } -} diff --git a/pkg/ruleengine/v1/r1007_xmr_crypto_mining_test.go b/pkg/ruleengine/v1/r1007_xmr_crypto_mining_test.go deleted file mode 100644 index 339ec0415..000000000 --- a/pkg/ruleengine/v1/r1007_xmr_crypto_mining_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package ruleengine - -import ( - "fmt" - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - tracerrandomxtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/randomx/types" -) - -func TestR1007XMRCryptoMining(t *testing.T) { - // Create a new rule - r := CreateRuleR1007XMRCryptoMining() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Test RandomX event - e3 := &tracerrandomxtype.Event{ - Comm: "test", - } - - ruleResult := ruleprocess.ProcessRule(r, utils.RandomXEventType, e3, &RuleObjectCacheMock{}) - if ruleResult == nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be Failure because of RandomX event") - return - } - -} diff --git a/pkg/ruleengine/v1/r1008_crypto_mining_domain.go b/pkg/ruleengine/v1/r1008_crypto_mining_domain.go deleted file mode 100644 index 61c2447ca..000000000 --- a/pkg/ruleengine/v1/r1008_crypto_mining_domain.go +++ /dev/null @@ -1,258 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "slices" - - "github.com/goradd/maps" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" -) - -const ( - R1008ID = "R1008" - R1008Name = "Crypto Mining Domain Communication" -) - -var commonlyUsedCryptoMinersDomains = []string{ - "2cryptocalc.com.", - "2miners.com.", - "antpool.com.", - "asia1.ethpool.org.", - "bohemianpool.com.", - "botbox.dev.", - "btm.antpool.com.", - "c3pool.com.", - "c4pool.org.", - "ca.minexmr.com.", - "cn.stratum.slushpool.com.", - "dash.antpool.com.", - "data.miningpoolstats.stream.", - "de.minexmr.com.", - "eth-ar.dwarfpool.com.", - "eth-asia.dwarfpool.com.", - "eth-asia1.nanopool.org.", - "eth-au.dwarfpool.com.", - "eth-au1.nanopool.org.", - "eth-br.dwarfpool.com.", - "eth-cn.dwarfpool.com.", - "eth-cn2.dwarfpool.com.", - "eth-eu.dwarfpool.com.", - "eth-eu1.nanopool.org.", - "eth-eu2.nanopool.org.", - "eth-hk.dwarfpool.com.", - "eth-jp1.nanopool.org.", - "eth-ru.dwarfpool.com.", - "eth-ru2.dwarfpool.com.", - "eth-sg.dwarfpool.com.", - "eth-us-east1.nanopool.org.", - "eth-us-west1.nanopool.org.", - "eth-us.dwarfpool.com.", - "eth-us2.dwarfpool.com.", - "eth.antpool.com.", - "eu.stratum.slushpool.com.", - "eu1.ethermine.org.", - "eu1.ethpool.org.", - "fastpool.xyz.", - "fr.minexmr.com.", - "kriptokyng.com.", - "mine.moneropool.com.", - "mine.xmrpool.net.", - "miningmadness.com.", - "monero.cedric-crispin.com.", - "monero.crypto-pool.fr.", - "monero.fairhash.org.", - "monero.hashvault.pro.", - "monero.herominers.com.", - "monerod.org.", - "monerohash.com.", - "moneroocean.stream.", - "monerop.com.", - "multi-pools.com.", - "p2pool.io.", - "pool.kryptex.com.", - "pool.minexmr.com.", - "pool.monero.hashvault.pro.", - "pool.rplant.xyz.", - "pool.supportxmr.com.", - "pool.xmr.pt.", - "prohashing.com.", - "rx.unmineable.com.", - "sg.minexmr.com.", - "sg.stratum.slushpool.com.", - "skypool.org.", - "solo-xmr.2miners.com.", - "ss.antpool.com.", - "stratum-btm.antpool.com.", - "stratum-dash.antpool.com.", - "stratum-eth.antpool.com.", - "stratum-ltc.antpool.com.", - "stratum-xmc.antpool.com.", - "stratum-zec.antpool.com.", - "stratum.antpool.com.", - "supportxmr.com.", - "trustpool.cc.", - "us-east.stratum.slushpool.com.", - "us1.ethermine.org.", - "us1.ethpool.org.", - "us2.ethermine.org.", - "us2.ethpool.org.", - "web.xmrpool.eu.", - "www.domajorpool.com.", - "www.dxpool.com.", - "www.mining-dutch.nl.", - "xmc.antpool.com.", - "xmr-asia1.nanopool.org.", - "xmr-au1.nanopool.org.", - "xmr-eu1.nanopool.org.", - "xmr-eu2.nanopool.org.", - "xmr-jp1.nanopool.org.", - "xmr-us-east1.nanopool.org.", - "xmr-us-west1.nanopool.org.", - "xmr.2miners.com.", - "xmr.crypto-pool.fr.", - "xmr.gntl.uk.", - "xmr.nanopool.org.", - "xmr.pool-pay.com.", - "xmr.pool.minergate.com.", - "xmr.solopool.org.", - "xmr.volt-mine.com.", - "xmr.zeropool.io.", - "zec.antpool.com.", - "zergpool.com.", - "auto.c3pool.org.", - "us.monero.herominers.com.", -} - -var R1008CryptoMiningDomainCommunicationRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1008ID, - Name: R1008Name, - Description: "Detecting Crypto miners communication by domain", - Tags: []string{"network", "crypto", "miners", "malicious", "dns"}, - Priority: RulePriorityCritical, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.DnsEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1008CryptoMiningDomainCommunication() - }, -} - -var _ ruleengine.RuleEvaluator = (*R1008CryptoMiningDomainCommunication)(nil) - -type R1008CryptoMiningDomainCommunication struct { - BaseRule - alertedDomains maps.SafeMap[string, bool] -} - -func CreateRuleR1008CryptoMiningDomainCommunication() *R1008CryptoMiningDomainCommunication { - return &R1008CryptoMiningDomainCommunication{} -} - -func (rule *R1008CryptoMiningDomainCommunication) Name() string { - return R1008Name -} - -func (rule *R1008CryptoMiningDomainCommunication) ID() string { - return R1008ID -} - -func (rule *R1008CryptoMiningDomainCommunication) DeleteRule() { -} - -func (rule *R1008CryptoMiningDomainCommunication) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.DnsEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - dnsEvent, ok := event.(*tracerdnstype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if rule.alertedDomains.Has(dnsEvent.DNSName) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if slices.Contains(commonlyUsedCryptoMinersDomains, dnsEvent.DNSName) { - return ruleengine.DetectionResult{IsFailure: true, Payload: dnsEvent} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1008CryptoMiningDomainCommunication) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - // This rule doesn't need profile evaluation since it's based on direct detection - return detectionResult, nil -} - -func (rule *R1008CryptoMiningDomainCommunication) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - dnsEvent, _ := event.(*tracerdnstype.Event) - rule.alertedDomains.Set(dnsEvent.DNSName, true) - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", dnsEvent.DNSName, dnsEvent.Comm)), - AlertName: rule.Name(), - InfectedPID: dnsEvent.Pid, - Severity: R1008CryptoMiningDomainCommunicationRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: dnsEvent.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(dnsEvent.Exepath), - Directory: filepath.Dir(dnsEvent.Exepath), - }, - Dns: &common.DnsEntity{ - Domain: dnsEvent.DNSName, - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: dnsEvent.Comm, - Gid: &dnsEvent.Gid, - PID: dnsEvent.Pid, - Uid: &dnsEvent.Uid, - Pcomm: dnsEvent.Pcomm, - Path: dnsEvent.Exepath, - Cwd: dnsEvent.Cwd, - PPID: dnsEvent.Ppid, - }, - ContainerID: dnsEvent.Runtime.ContainerID, - }, - TriggerEvent: dnsEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Communication with a known crypto mining domain: %s", dnsEvent.DNSName), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: dnsEvent.GetPod(), - PodLabels: dnsEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R1008CryptoMiningDomainCommunication) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1008CryptoMiningDomainCommunicationRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.NotRequired, - }, - } -} diff --git a/pkg/ruleengine/v1/r1008_crypto_mining_domain_test.go b/pkg/ruleengine/v1/r1008_crypto_mining_domain_test.go deleted file mode 100644 index ff7ee5ac8..000000000 --- a/pkg/ruleengine/v1/r1008_crypto_mining_domain_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package ruleengine - -import ( - "fmt" - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" -) - -func TestR1008CryptoMiningDomainCommunication(t *testing.T) { - // Create a new rule - r := CreateRuleR1008CryptoMiningDomainCommunication() - // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Create dns event - e2 := &tracerdnstype.Event{ - DNSName: "xmr.gntl.uk.", - } - - ruleResult := ruleprocess.ProcessRule(r, utils.DnsEventType, e2, &RuleObjectCacheMock{}) - if ruleResult == nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be Failure because of dns name is in the commonly used crypto miners domains") - return - } - - e2.DNSName = "amit.com" - - ruleResult = ruleprocess.ProcessRule(r, utils.DnsEventType, e2, &RuleObjectCacheMock{}) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since dns name is not in the commonly used crypto miners domains") - return - } -} diff --git a/pkg/ruleengine/v1/r1009_crypto_mining_port.go b/pkg/ruleengine/v1/r1009_crypto_mining_port.go deleted file mode 100644 index f4b3fab80..000000000 --- a/pkg/ruleengine/v1/r1009_crypto_mining_port.go +++ /dev/null @@ -1,172 +0,0 @@ -package ruleengine - -import ( - "fmt" - "slices" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" -) - -const ( - R1009ID = "R1009" - R1009Name = "Crypto Mining Related Port Communication" -) - -var CommonlyUsedCryptoMinersPorts = []uint16{ - 3333, // Monero (XMR) - Stratum mining protocol (TCP). - 45700, // Monero (XMR) - Stratum mining protocol (TCP). (stratum+tcp://xmr.pool.minergate.com) -} - -var R1009CryptoMiningRelatedPortRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1009ID, - Name: R1009Name, - Description: "Detecting Crypto Miners by suspicious port usage.", - Tags: []string{"network", "crypto", "miners", "malicious"}, - Priority: RulePriorityLow, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.NetworkEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1009CryptoMiningRelatedPort() - }, -} - -var _ ruleengine.RuleEvaluator = (*R1009CryptoMiningRelatedPort)(nil) - -type R1009CryptoMiningRelatedPort struct { - BaseRule - alreadyNotified bool -} - -func CreateRuleR1009CryptoMiningRelatedPort() *R1009CryptoMiningRelatedPort { - return &R1009CryptoMiningRelatedPort{} -} - -func (rule *R1009CryptoMiningRelatedPort) Name() string { - return R1009Name -} - -func (rule *R1009CryptoMiningRelatedPort) ID() string { - return R1009ID -} - -func (rule *R1009CryptoMiningRelatedPort) DeleteRule() { -} - -func (rule *R1009CryptoMiningRelatedPort) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.NetworkEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - networkEvent, ok := event.(*tracernetworktype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if rule.alreadyNotified { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - if networkEvent.Proto == "TCP" && networkEvent.PktType == "OUTGOING" && slices.Contains(CommonlyUsedCryptoMinersPorts, networkEvent.Port) { - return ruleengine.DetectionResult{IsFailure: true, Payload: networkEvent} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1009CryptoMiningRelatedPort) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - networkEventTyped, _ := event.(*tracernetworktype.Event) - nn, err := GetNetworkNeighborhood(networkEventTyped.Runtime.ContainerID, objCache) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - nnContainer, err := GetContainerFromNetworkNeighborhood(nn, networkEventTyped.GetContainer()) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } - - // Check if the port is in the egress list - for _, nn := range nnContainer.Egress { - for _, port := range nn.Ports { - if port.Port == nil { - continue - } - if networkEventTyped.Port == uint16(*port.Port) { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - } - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: nil}, nil -} - -func (rule *R1009CryptoMiningRelatedPort) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - networkEvent, _ := event.(*tracernetworktype.Event) - rule.alreadyNotified = true - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%d", networkEvent.Comm, networkEvent.Port)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "port": networkEvent.Port, - "proto": networkEvent.Proto, - "ip": networkEvent.DstEndpoint.Addr, - }, - InfectedPID: networkEvent.Pid, - Severity: R1009CryptoMiningRelatedPortRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: networkEvent.Comm, - }, - Network: &common.NetworkEntity{ - DstIP: networkEvent.DstEndpoint.Addr, - DstPort: int(networkEvent.Port), - Protocol: networkEvent.Proto, - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: networkEvent.Comm, - Gid: &networkEvent.Gid, - PID: networkEvent.Pid, - Uid: &networkEvent.Uid, - }, - ContainerID: networkEvent.Runtime.ContainerID, - }, - TriggerEvent: networkEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Detected crypto mining related port communication on port %d to %s with protocol %s", networkEvent.Port, networkEvent.DstEndpoint.Addr, networkEvent.Proto), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: networkEvent.GetPod(), - PodLabels: networkEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R1009CryptoMiningRelatedPort) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1009CryptoMiningRelatedPortRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Optional, - ProfileType: apitypes.NetworkProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1009_crypto_mining_port_test.go b/pkg/ruleengine/v1/r1009_crypto_mining_port_test.go deleted file mode 100644 index 21bfbc366..000000000 --- a/pkg/ruleengine/v1/r1009_crypto_mining_port_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package ruleengine - -import ( - "testing" - - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - - tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" - tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" -) - -func TestR1009CryptoMiningRelatedPort(t *testing.T) { - rule := &R1009CryptoMiningRelatedPort{} - - // Test when eventType is not NetworkEventType - eventType := utils.RandomXEventType - event := &tracernetworktype.Event{} - result := ruleprocess.ProcessRule(rule, eventType, event, &RuleObjectCacheMock{}) - if result != nil { - t.Errorf("Expected nil, got %v", result) - } - - // Test when event is not of type *tracernetworktype.Event - eventType = utils.NetworkEventType - event2 := &tracerexectype.Event{} - result = ruleprocess.ProcessRule(rule, eventType, event2, &RuleObjectCacheMock{}) - if result != nil { - t.Errorf("Expected nil, got %v", result) - } - - var port int32 = 3334 - - // Test with whitelisted port - objCache := RuleObjectCacheMock{} - nn := objCache.NetworkNeighborhoodCache().GetNetworkNeighborhood("test") - if nn == nil { - nn = &v1beta1.NetworkNeighborhood{} - nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ - Name: "test", - - Egress: []v1beta1.NetworkNeighbor{ - { - DNS: "test.com", - Ports: []v1beta1.NetworkPort{ - { - Port: &port, - }, - }, - }, - }, - }) - - objCache.SetNetworkNeighborhood(nn) - } - - // Test when event meets all conditions to return a ruleFailure - eventType = utils.NetworkEventType - event = &tracernetworktype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Proto: "TCP", - PktType: "OUTGOING", - Port: CommonlyUsedCryptoMinersPorts[0], - Comm: "testComm", - Gid: 1, - Pid: 1, - Uid: 1, - } - result = ruleprocess.ProcessRule(rule, eventType, event, &objCache) - if result == nil { - t.Errorf("Expected ruleFailure, got nil") - } - - // Test when event does not meet conditions to return a ruleFailure - port = 3333 - objCache.nn.Spec.Containers[0].Egress[0].Ports[0].Port = &port - result = ruleprocess.ProcessRule(rule, eventType, event, &objCache) - if result != nil { - t.Errorf("Expected nil, got %v", result) - } -} diff --git a/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file.go b/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file.go deleted file mode 100644 index e7a00a064..000000000 --- a/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file.go +++ /dev/null @@ -1,177 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - "github.com/kubescape/go-logger" - "github.com/kubescape/go-logger/helpers" - - tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" -) - -const ( - R1010ID = "R1010" - R1010Name = "Symlink Created Over Sensitive File" -) - -var R1010SymlinkCreatedOverSensitiveFileRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1010ID, - Name: R1010Name, - Description: "Detecting symlink creation over sensitive files.", - Tags: []string{"files", "malicious"}, - Priority: RulePriorityHigh, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.SymlinkEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1010SymlinkCreatedOverSensitiveFile() - }, - RulePolicySupport: true, -} - -var _ ruleengine.RuleEvaluator = (*R1010SymlinkCreatedOverSensitiveFile)(nil) - -type R1010SymlinkCreatedOverSensitiveFile struct { - BaseRule - additionalPaths []string -} - -func CreateRuleR1010SymlinkCreatedOverSensitiveFile() *R1010SymlinkCreatedOverSensitiveFile { - return &R1010SymlinkCreatedOverSensitiveFile{ - additionalPaths: SensitiveFiles, - } -} - -func (rule *R1010SymlinkCreatedOverSensitiveFile) SetParameters(parameters map[string]interface{}) { - rule.BaseRule.SetParameters(parameters) - - additionalPathsInterface := rule.GetParameters()["additionalPaths"] - if additionalPathsInterface == nil { - return - } - - additionalPaths, ok := InterfaceToStringSlice(additionalPathsInterface) - if ok { - for _, path := range additionalPaths { - rule.additionalPaths = append(rule.additionalPaths, fmt.Sprintf("%v", path)) - } - } else { - logger.L().Warning("failed to convert additionalPaths to []string", helpers.String("ruleID", rule.ID())) - } -} - -func (rule *R1010SymlinkCreatedOverSensitiveFile) Name() string { - return R1010Name -} - -func (rule *R1010SymlinkCreatedOverSensitiveFile) ID() string { - return R1010ID -} - -func (rule *R1010SymlinkCreatedOverSensitiveFile) DeleteRule() { -} - -func (rule *R1010SymlinkCreatedOverSensitiveFile) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.SymlinkEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - symlinkEvent, ok := event.(*tracersymlinktype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - for _, path := range rule.additionalPaths { - if strings.HasPrefix(symlinkEvent.OldPath, path) { - return ruleengine.DetectionResult{IsFailure: true, Payload: symlinkEvent.OldPath} - } - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1010SymlinkCreatedOverSensitiveFile) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - symlinkEventTyped, _ := event.(*tracersymlinktype.Event) - if allowed, err := IsAllowed(&symlinkEventTyped.Event, objCache, symlinkEventTyped.Comm, R1010ID); err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } else if allowed { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - - return detectionResult, nil -} - -func (rule *R1010SymlinkCreatedOverSensitiveFile) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - symlinkEvent, _ := event.(*tracersymlinktype.Event) - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", symlinkEvent.Comm, symlinkEvent.OldPath)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "oldPath": symlinkEvent.OldPath, - "newPath": symlinkEvent.NewPath, - }, - InfectedPID: symlinkEvent.Pid, - Severity: R1010SymlinkCreatedOverSensitiveFileRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: symlinkEvent.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(symlinkEvent.OldPath), - Directory: filepath.Dir(symlinkEvent.OldPath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: symlinkEvent.Comm, - PPID: symlinkEvent.PPid, - PID: symlinkEvent.Pid, - UpperLayer: &symlinkEvent.UpperLayer, - Uid: &symlinkEvent.Uid, - Gid: &symlinkEvent.Gid, - Hardlink: symlinkEvent.ExePath, - Path: symlinkEvent.ExePath, - }, - ContainerID: symlinkEvent.Runtime.ContainerID, - }, - TriggerEvent: symlinkEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Symlink created over sensitive file: %s - %s", symlinkEvent.OldPath, symlinkEvent.NewPath), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: symlinkEvent.GetPod(), - PodLabels: symlinkEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: symlinkEvent.GetExtra(), - } -} - -func (rule *R1010SymlinkCreatedOverSensitiveFile) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1010SymlinkCreatedOverSensitiveFileRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Optional, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file_test.go b/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file_test.go deleted file mode 100644 index 9f44b1b1f..000000000 --- a/pkg/ruleengine/v1/r1010_symlink_created_over_sensitive_file_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package ruleengine - -import ( - "fmt" - "testing" - - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - - tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" -) - -func TestR1010SymlinkCreatedOverSensitiveFile(t *testing.T) { - // Create a new rule - r := CreateRuleR1010SymlinkCreatedOverSensitiveFile() // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - PolicyByRuleId: map[string]v1beta1.RulePolicy{ - R1010ID: { - AllowedProcesses: []string{"/usr/sbin/groupadd"}, - }, - }, - Opens: []v1beta1.OpenCalls{ - { - Path: "/test", - Flags: []string{"O_RDONLY"}, - }, - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - } - - // Create a symlink event - e := &tracersymlinktype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "test", - OldPath: "test", - NewPath: "test", - } - - ruleResult := ruleprocess.ProcessRule(r, utils.SymlinkEventType, e, &objCache) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since symlink path is not sensitive") - return - } - - // Create a symlink event with sensitive file path - e.OldPath = "/etc/shadow" - e.NewPath = "/etc/abc" - - ruleResult = ruleprocess.ProcessRule(r, utils.SymlinkEventType, e, &objCache) - if ruleResult == nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be Failure because of symlink is used over sensitive file") - return - } - - e.OldPath = "/etc/abc" - ruleResult = ruleprocess.ProcessRule(r, utils.SymlinkEventType, e, &objCache) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since symlink is not used over sensitive file") - return - } - - // Test with whitelisted process - e.Comm = "/usr/sbin/groupadd" - e.OldPath = "/etc/shadow" - e.NewPath = "/etc/abc" - - ruleResult = ruleprocess.ProcessRule(r, utils.SymlinkEventType, e, &objCache) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since file is whitelisted and not sensitive") - return - } -} diff --git a/pkg/ruleengine/v1/r1011_ld_preload_hook.go b/pkg/ruleengine/v1/r1011_ld_preload_hook.go deleted file mode 100644 index 0a1e94766..000000000 --- a/pkg/ruleengine/v1/r1011_ld_preload_hook.go +++ /dev/null @@ -1,301 +0,0 @@ -package ruleengine - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - events "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" -) - -const ( - R1011ID = "R1011" - R1011Name = "LD_PRELOAD Hook" - LD_PRELOAD_FILE = "/etc/ld.so.preload" - JAVA_COMM = "java" -) - -var LD_PRELOAD_ENV_VARS = []string{"LD_PRELOAD", "LD_AUDIT", "LD_LIBRARY_PATH"} - -var R1011LdPreloadHookRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1011ID, - Name: R1011Name, - Description: "Detecting ld_preload hook techniques.", - Tags: []string{"exec", "malicious"}, - Priority: RulePriorityMed, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.ExecveEventType, - utils.OpenEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1011LdPreloadHook() - }, - RulePolicySupport: true, -} -var _ ruleengine.RuleEvaluator = (*R1011LdPreloadHook)(nil) - -type R1011LdPreloadHook struct { - BaseRule -} - -func CreateRuleR1011LdPreloadHook() *R1011LdPreloadHook { - return &R1011LdPreloadHook{} -} - -func (rule *R1011LdPreloadHook) Name() string { - return R1011Name -} - -func (rule *R1011LdPreloadHook) ID() string { - return R1011ID -} - -func (rule *R1011LdPreloadHook) DeleteRule() { -} - -func (rule *R1011LdPreloadHook) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - switch eventType { - case utils.ExecveEventType: - execEvent, ok := event.(*events.ExecEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - return rule.shouldAlertExec(execEvent, k8sObjCache) - - case utils.OpenEventType: - openEvent, ok := event.(*events.OpenEvent) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - return rule.shouldAlertOpen(openEvent) - - default: - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } -} - -func (rule *R1011LdPreloadHook) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - - switch eventType { - case utils.ExecveEventType: - execEvent, _ := event.(*events.ExecEvent) - if allowed, err := IsAllowed(&execEvent.Event.Event, objCache, execEvent.Comm, R1011ID); err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } else if allowed { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - return ruleengine.DetectionResult{IsFailure: true, Payload: nil}, nil - - case utils.OpenEventType: - openEvent, _ := event.(*events.OpenEvent) - if allowed, err := IsAllowed(&openEvent.Event.Event, objCache, openEvent.Comm, R1011ID); err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } else if allowed { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - return ruleengine.DetectionResult{IsFailure: true, Payload: nil}, nil - - default: - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } -} - -func (rule *R1011LdPreloadHook) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - switch eventType { - case utils.ExecveEventType: - execEvent, _ := event.(*events.ExecEvent) - return rule.ruleFailureExecEvent(execEvent) - - case utils.OpenEventType: - openEvent, _ := event.(*events.OpenEvent) - return rule.ruleFailureOpenEvent(&openEvent.Event, openEvent.GetExtra()) - - default: - return nil - } -} - -func (rule *R1011LdPreloadHook) ruleFailureExecEvent(execEvent *events.ExecEvent) ruleengine.RuleFailure { - envVars, err := utils.GetProcessEnv(int(execEvent.Pid)) - if err != nil { - return nil - } - - ldHookVar, _ := GetLdHookVar(envVars) - - upperLayer := execEvent.UpperLayer || execEvent.PupperLayer - - ruleFailure := GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s%s", execEvent.Comm, execEvent.ExePath, execEvent.Pcomm)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{"envVar": ldHookVar}, - InfectedPID: execEvent.Pid, - Severity: R1011LdPreloadHookRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: execEvent.Comm, - CommandLine: fmt.Sprintf("%s %s", execEvent.ExePath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - File: &common.FileEntity{ - Name: execEvent.ExePath, - Directory: filepath.Dir(execEvent.ExePath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: execEvent.Comm, - Gid: &execEvent.Gid, - PID: execEvent.Pid, - Uid: &execEvent.Uid, - UpperLayer: &upperLayer, - PPID: execEvent.Ppid, - Pcomm: execEvent.Pcomm, - Cwd: execEvent.Cwd, - Hardlink: execEvent.ExePath, - Path: GetExecFullPathFromEvent(execEvent), - Cmdline: fmt.Sprintf("%s %s", GetExecPathFromEvent(execEvent), strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), - }, - ContainerID: execEvent.Runtime.ContainerID, - }, - TriggerEvent: execEvent.Event.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Process (%s) was executed and is using the environment variable %s", execEvent.Comm, fmt.Sprintf("%s=%s", ldHookVar, envVars[ldHookVar])), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: execEvent.GetPod(), - PodLabels: execEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: execEvent.GetExtra(), - } - - return &ruleFailure -} - -func (rule *R1011LdPreloadHook) ruleFailureOpenEvent(openEvent *traceropentype.Event, extra interface{}) ruleengine.RuleFailure { - ruleFailure := GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", openEvent.Comm, openEvent.FullPath)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "path": openEvent.FullPath, - "flags": openEvent.Flags, - }, - InfectedPID: openEvent.Pid, - Severity: R1011LdPreloadHookRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: openEvent.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(openEvent.FullPath), - Directory: filepath.Dir(openEvent.FullPath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: openEvent.Comm, - Gid: &openEvent.Gid, - PID: openEvent.Pid, - Uid: &openEvent.Uid, - }, - ContainerID: openEvent.Runtime.ContainerID, - }, - TriggerEvent: openEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Process (%s) was executed and is opening the file %s", openEvent.Comm, openEvent.Path), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: openEvent.GetPod(), - PodLabels: openEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: extra, - } - - return &ruleFailure -} - -func (rule *R1011LdPreloadHook) shouldAlertExec(execEvent *events.ExecEvent, k8sObjCache objectcache.K8sObjectCache) ruleengine.DetectionResult { - // Java is a special case, we don't want to alert on it because it uses LD_LIBRARY_PATH. - if execEvent.Comm == JAVA_COMM { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - // Check if the process is a MATLAB process and ignore it. - if execEvent.GetContainer() == "matlab" { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - envVars, err := utils.GetProcessEnv(int(execEvent.Pid)) - if err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - ldHookVar, shouldCheck := GetLdHookVar(envVars) - if shouldCheck { - if k8sObjCache == nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - podSpec := k8sObjCache.GetPodSpec(execEvent.GetNamespace(), execEvent.GetPod()) - if podSpec != nil { - for _, container := range podSpec.Containers { - if container.Name == execEvent.GetContainer() { - for _, envVar := range container.Env { - if envVar.Name == ldHookVar { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - } - } - } - } - return ruleengine.DetectionResult{IsFailure: true, Payload: nil} - } - - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1011LdPreloadHook) shouldAlertOpen(openEvent *events.OpenEvent) ruleengine.DetectionResult { - if openEvent.FullPath == LD_PRELOAD_FILE && (openEvent.FlagsRaw&(int32(os.O_WRONLY)|int32(os.O_RDWR))) != 0 { - return ruleengine.DetectionResult{IsFailure: true, Payload: nil} - } - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func GetLdHookVar(envVars map[string]string) (string, bool) { - for _, envVar := range LD_PRELOAD_ENV_VARS { - if _, ok := envVars[envVar]; ok { - return envVar, true - } - } - return "", false -} - -func (rule *R1011LdPreloadHook) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1011LdPreloadHookRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Optional, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1011_ld_preload_hook_test.go b/pkg/ruleengine/v1/r1011_ld_preload_hook_test.go deleted file mode 100644 index adb607dbc..000000000 --- a/pkg/ruleengine/v1/r1011_ld_preload_hook_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package ruleengine - -import ( - "testing" - - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - - traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - corev1 "k8s.io/api/core/v1" -) - -func TestR1011LdPreloadHook(t *testing.T) { - // Create a new rule - r := CreateRuleR1011LdPreloadHook() // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - PolicyByRuleId: map[string]v1beta1.RulePolicy{ - R1011ID: { - AllowedProcesses: []string{"x"}, - }, - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - } - - // Create open event - e := &events.OpenEvent{ - Event: traceropentype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "test", - FullPath: "/etc/ld.so.preload", - FlagsRaw: 1, - }, - } - - // Test with existing ld_preload file - ruleResult := ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to not be nil since ld_preload file is opened with write flag") - } - - // Test with ld.so.preload file opened with read flag - e.FlagsRaw = 0 - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since ld_preload file is opened with read flag") - } - - // Test with pod spec - objCache.SetPodSpec(&corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test", - VolumeMounts: []corev1.VolumeMount{ - { - Name: "test", - MountPath: "/var", - }, - }, - Env: []corev1.EnvVar{ - { - Name: "LD_PRELOAD", - Value: "/var/test.so", - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "test", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var", - }, - }, - }, - }, - }) - e.FullPath = "/var/test.so" - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since LD_PRELOAD is set in pod spec") - } - - // Create open event - e2 := &events.OpenEvent{ - Event: traceropentype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "java", - }, - } - // Test with exec event - ruleResult = ruleprocess.ProcessRule(r, utils.ExecveEventType, e2, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since exec event is on java") - } - - e3 := &events.OpenEvent{ - Event: traceropentype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "test", - FullPath: "/etc/ld.so.preload", - FlagsRaw: 1, - }, - } - - objCache = RuleObjectCacheMock{} - profile = objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - PolicyByRuleId: map[string]v1beta1.RulePolicy{ - R1011ID: { - AllowedProcesses: []string{"test"}, - }, - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - } - // Test with exec event - ruleResult = ruleprocess.ProcessRule(r, utils.OpenEventType, e3, &objCache) - if ruleResult != nil { - t.Errorf("Expected ruleResult to be nil since exec event is on java") - } - -} diff --git a/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file.go b/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file.go deleted file mode 100644 index 12f5c96da..000000000 --- a/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file.go +++ /dev/null @@ -1,175 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - "github.com/kubescape/go-logger" - "github.com/kubescape/go-logger/helpers" - - tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" -) - -const ( - R1012ID = "R1012" - R1012Name = "Hardlink Created Over Sensitive File" -) - -var R1012HardlinkCreatedOverSensitiveFileRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1012ID, - Name: R1012Name, - Description: "Detecting hardlink creation over sensitive files.", - Tags: []string{"files", "malicious"}, - Priority: RulePriorityHigh, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.HardlinkEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1012HardlinkCreatedOverSensitiveFile() - }, - RulePolicySupport: true, -} -var _ ruleengine.RuleEvaluator = (*R1012HardlinkCreatedOverSensitiveFile)(nil) - -type R1012HardlinkCreatedOverSensitiveFile struct { - BaseRule - additionalPaths []string -} - -func CreateRuleR1012HardlinkCreatedOverSensitiveFile() *R1012HardlinkCreatedOverSensitiveFile { - return &R1012HardlinkCreatedOverSensitiveFile{ - additionalPaths: SensitiveFiles, - } -} - -func (rule *R1012HardlinkCreatedOverSensitiveFile) SetParameters(parameters map[string]interface{}) { - rule.BaseRule.SetParameters(parameters) - - additionalPathsInterface := rule.GetParameters()["additionalPaths"] - if additionalPathsInterface == nil { - return - } - - additionalPaths, ok := InterfaceToStringSlice(additionalPathsInterface) - if ok { - for _, path := range additionalPaths { - rule.additionalPaths = append(rule.additionalPaths, fmt.Sprintf("%v", path)) - } - } else { - logger.L().Warning("failed to convert additionalPaths to []string", helpers.String("ruleID", rule.ID())) - } -} - -func (rule *R1012HardlinkCreatedOverSensitiveFile) Name() string { - return R1012Name -} - -func (rule *R1012HardlinkCreatedOverSensitiveFile) ID() string { - return R1012ID -} - -func (rule *R1012HardlinkCreatedOverSensitiveFile) DeleteRule() { -} - -func (rule *R1012HardlinkCreatedOverSensitiveFile) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, _ objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.HardlinkEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - hardlinkEvent, ok := event.(*tracerhardlinktype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - for _, path := range rule.additionalPaths { - if strings.HasPrefix(hardlinkEvent.OldPath, path) { - return ruleengine.DetectionResult{IsFailure: true, Payload: hardlinkEvent.OldPath} - } - } - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} -} - -func (rule *R1012HardlinkCreatedOverSensitiveFile) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - // First do basic evaluation - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - hardlinkEvent, _ := event.(*tracerhardlinktype.Event) - if allowed, err := IsAllowed(&hardlinkEvent.Event, objCache, hardlinkEvent.Comm, R1012ID); err != nil { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err // If we can't check profile, we still want to alert - } else if allowed { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - - return detectionResult, nil -} - -func (rule *R1012HardlinkCreatedOverSensitiveFile) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - hardlinkEvent, _ := event.(*tracerhardlinktype.Event) - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", hardlinkEvent.Comm, hardlinkEvent.OldPath)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "oldPath": hardlinkEvent.OldPath, - "newPath": hardlinkEvent.NewPath, - }, - InfectedPID: hardlinkEvent.Pid, - Severity: R1012HardlinkCreatedOverSensitiveFileRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: hardlinkEvent.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(hardlinkEvent.OldPath), - Directory: filepath.Dir(hardlinkEvent.OldPath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: hardlinkEvent.Comm, - PPID: hardlinkEvent.PPid, - PID: hardlinkEvent.Pid, - UpperLayer: &hardlinkEvent.UpperLayer, - Uid: &hardlinkEvent.Uid, - Gid: &hardlinkEvent.Gid, - Path: hardlinkEvent.ExePath, - Hardlink: hardlinkEvent.ExePath, - }, - ContainerID: hardlinkEvent.Runtime.ContainerID, - }, - TriggerEvent: hardlinkEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Hardlink created over sensitive file: %s - %s", hardlinkEvent.OldPath, hardlinkEvent.NewPath), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: hardlinkEvent.GetPod(), - PodLabels: hardlinkEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - Extra: hardlinkEvent.GetExtra(), - } -} - -func (rule *R1012HardlinkCreatedOverSensitiveFile) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1012HardlinkCreatedOverSensitiveFileRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Optional, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file_test.go b/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file_test.go deleted file mode 100644 index 4a575f32f..000000000 --- a/pkg/ruleengine/v1/r1012_hardlink_created_over_sensitive_file_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package ruleengine - -import ( - "fmt" - "testing" - - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - - tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" -) - -func TestR1012HardlinkCreatedOverSensitiveFile(t *testing.T) { - // Create a new rule - r := CreateRuleR1012HardlinkCreatedOverSensitiveFile() // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - PolicyByRuleId: map[string]v1beta1.RulePolicy{ - R1012ID: { - AllowedProcesses: []string{"/usr/sbin/groupadd"}, - }, - }, - Opens: []v1beta1.OpenCalls{ - { - Path: "/test", - Flags: []string{"O_RDONLY"}, - }, - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - } - - // Create a hardlink event - e := &tracerhardlinktype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "test", - OldPath: "test", - NewPath: "test", - } - - ruleResult := ruleprocess.ProcessRule(r, utils.HardlinkEventType, e, &objCache) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since hardlink path is not sensitive") - return - } - - // Create a hardlink event with sensitive file path - e.OldPath = "/etc/shadow" - e.NewPath = "/etc/abc" - ruleResult = ruleprocess.ProcessRule(r, utils.HardlinkEventType, e, &objCache) - if ruleResult == nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be Failure because of hardlink is used over sensitive file") - return - } - - e.OldPath = "/etc/abc" - ruleResult = ruleprocess.ProcessRule(r, utils.HardlinkEventType, e, &objCache) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since hardlink is not used over sensitive file") - return - } - - // Test with whitelisted process - e.Comm = "/usr/sbin/groupadd" - e.OldPath = "/etc/shadow" - e.NewPath = "/etc/abc" - ruleResult = ruleprocess.ProcessRule(r, utils.HardlinkEventType, e, &objCache) - if ruleResult != nil { - fmt.Printf("ruleResult: %v\n", ruleResult) - t.Errorf("Expected ruleResult to be nil since file is whitelisted and not sensitive") - return - } -} diff --git a/pkg/ruleengine/v1/r1015_malicious_ptrace_usage.go b/pkg/ruleengine/v1/r1015_malicious_ptrace_usage.go deleted file mode 100644 index 5f415a1ef..000000000 --- a/pkg/ruleengine/v1/r1015_malicious_ptrace_usage.go +++ /dev/null @@ -1,130 +0,0 @@ -package ruleengine - -import ( - "fmt" - "path/filepath" - - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" - - tracerptracetype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer/types" -) - -const ( - R1015ID = "R1015" - R1015Name = "Malicious Ptrace Usage" -) - -var R1015MaliciousPtraceUsageRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1015ID, - Name: R1015Name, - Description: "Detecting potentially malicious ptrace usage.", - Tags: []string{"process", "malicious"}, - Priority: RulePriorityHigh, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.PtraceEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1015MaliciousPtraceUsage() - }, -} -var _ ruleengine.RuleEvaluator = (*R1015MaliciousPtraceUsage)(nil) - -type R1015MaliciousPtraceUsage struct { - BaseRule -} - -func CreateRuleR1015MaliciousPtraceUsage() *R1015MaliciousPtraceUsage { - return &R1015MaliciousPtraceUsage{} -} - -func (rule *R1015MaliciousPtraceUsage) SetParameters(parameters map[string]interface{}) { - -} - -func (rule *R1015MaliciousPtraceUsage) Name() string { - return R1015Name -} - -func (rule *R1015MaliciousPtraceUsage) ID() string { - return R1015ID -} - -func (rule *R1015MaliciousPtraceUsage) DeleteRule() { -} - -func (rule *R1015MaliciousPtraceUsage) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, _ objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.PtraceEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - _, ok := event.(*tracerptracetype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - return ruleengine.DetectionResult{IsFailure: true, Payload: nil} -} - -// Won't be used, because the rule is not profile dependent -func (rule *R1015MaliciousPtraceUsage) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - return detectionResult, nil -} - -func (rule *R1015MaliciousPtraceUsage) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - ptraceEvent, _ := event.(*tracerptracetype.Event) - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", ptraceEvent.ExePath, ptraceEvent.Comm)), - AlertName: rule.Name(), - InfectedPID: ptraceEvent.Pid, - Severity: R1015MaliciousPtraceUsageRuleDescriptor.Priority, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: ptraceEvent.Comm, - }, - File: &common.FileEntity{ - Name: filepath.Base(ptraceEvent.ExePath), - Directory: filepath.Dir(ptraceEvent.ExePath), - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: ptraceEvent.Comm, - PPID: ptraceEvent.PPid, - PID: ptraceEvent.Pid, - Uid: &ptraceEvent.Uid, - Gid: &ptraceEvent.Gid, - Path: ptraceEvent.ExePath, - }, - ContainerID: ptraceEvent.Runtime.ContainerID, - }, - TriggerEvent: ptraceEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Malicious ptrace usage detected from: %s on PID: %d", ptraceEvent.Comm, ptraceEvent.Pid), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: ptraceEvent.GetPod(), - PodLabels: ptraceEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R1015MaliciousPtraceUsage) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1015MaliciousPtraceUsageRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.NotRequired, - }, - } -} diff --git a/pkg/ruleengine/v1/r1015_malicious_ptrace_usage_test.go b/pkg/ruleengine/v1/r1015_malicious_ptrace_usage_test.go deleted file mode 100644 index 3d3a803c6..000000000 --- a/pkg/ruleengine/v1/r1015_malicious_ptrace_usage_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package ruleengine - -import ( - "testing" - - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" - - tracerptracetype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer/types" -) - -const ( - // Define the ptrace constants - PTRACE_SETREGS = 13 - PTRACE_POKETEXT = 4 - PTRACE_POKEDATA = 5 -) - -func TestR1015MaliciousPtraceUsage(t *testing.T) { - // Create a new rule - r := CreateRuleR1015MaliciousPtraceUsage() // Assert r is not nil - if r == nil { - t.Errorf("Expected r to not be nil") - } - - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - Opens: []v1beta1.OpenCalls{ - { - Path: "/test", - Flags: []string{"O_RDONLY"}, - }, - }, - Execs: []v1beta1.ExecCalls{ - { - Path: "/usr/sbin/groupadd", - Args: []string{"test"}, - }, - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - } - - // Create a ptrace event for a disallowed request (malicious request) - e := &tracerptracetype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Comm: "malicious_process", - Pid: 1234, - PPid: 5678, - Uid: 1000, - Gid: 1000, - ExePath: "/path/to/malicious_process", - Request: PTRACE_SETREGS, // Malicious ptrace request - } - - ruleResult := ruleprocess.ProcessRule(r, utils.PtraceEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to be Failure because of malicious ptrace request: %d", e.Request) - return - } - - // Check that the ruleResult contains the expected details - genericRuleFailure, ok := ruleResult.(*GenericRuleFailure) - if !ok { - t.Errorf("Expected ruleResult to be of type GenericRuleFailure") - return - } - - if genericRuleFailure.BaseRuntimeAlert.AlertName != r.Name() { - t.Errorf("Expected AlertName to be %s, got %s", r.Name(), genericRuleFailure.BaseRuntimeAlert.AlertName) - } - if genericRuleFailure.BaseRuntimeAlert.InfectedPID != e.Pid { - t.Errorf("Expected InfectedPID to be %d, got %d", e.Pid, genericRuleFailure.BaseRuntimeAlert.InfectedPID) - } - - // Test with a disallowed request but recognized process - e.Comm = "processA" // Allowed process - e.Request = PTRACE_POKETEXT // Malicious ptrace request - ruleResult = ruleprocess.ProcessRule(r, utils.PtraceEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to be Failure because of malicious ptrace request: %d, even though process is allowed", e.Request) - return - } - - // Test with an unrecognized process and malicious request - e.Comm = "unknown_process" - e.Request = PTRACE_POKEDATA // Malicious ptrace request - ruleResult = ruleprocess.ProcessRule(r, utils.PtraceEventType, e, &objCache) - if ruleResult == nil { - t.Errorf("Expected ruleResult to be Failure because of unknown process with malicious ptrace request: %d", e.Request) - } -} diff --git a/pkg/ruleengine/v1/r1030_unexpected_iouring_syscall.go b/pkg/ruleengine/v1/r1030_unexpected_iouring_syscall.go deleted file mode 100644 index cd04dd7ee..000000000 --- a/pkg/ruleengine/v1/r1030_unexpected_iouring_syscall.go +++ /dev/null @@ -1,154 +0,0 @@ -package ruleengine - -import ( - "errors" - "fmt" - - "github.com/kubescape/go-logger" - "github.com/kubescape/go-logger/helpers" - traceriouringtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/iouring/tracer/types" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/ruleengine/v1/helpers/iouring" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - - apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/armosec/armoapi-go/armotypes/common" -) - -const ( - R1030ID = "R1030" - R1030Name = "Unexpected io_uring Operation Detected" -) - -var R1030UnexpectedIouringOperationRuleDescriptor = ruleengine.RuleDescriptor{ - ID: R1030ID, - Name: R1030Name, - Description: "Detects io_uring operations that were not recorded during the initial observation period, indicating potential unauthorized activity.", - Tags: []string{"syscalls", "io_uring"}, - Priority: RulePriorityHigh, - Requirements: &RuleRequirements{ - EventTypes: []utils.EventType{ - utils.IoUringEventType, - }, - }, - RuleCreationFunc: func() ruleengine.RuleEvaluator { - return CreateRuleR1030UnexpectedIouringOperation() - }, - RulePolicySupport: true, -} - -var _ ruleengine.RuleEvaluator = (*R1030UnexpectedIouringOperation)(nil) - -type R1030UnexpectedIouringOperation struct { - BaseRule -} - -func CreateRuleR1030UnexpectedIouringOperation() *R1030UnexpectedIouringOperation { - return &R1030UnexpectedIouringOperation{} -} - -func (rule *R1030UnexpectedIouringOperation) SetParameters(parameters map[string]interface{}) { -} - -func (rule *R1030UnexpectedIouringOperation) Name() string { - return R1030Name -} - -func (rule *R1030UnexpectedIouringOperation) ID() string { - return R1030ID -} - -func (rule *R1030UnexpectedIouringOperation) DeleteRule() { -} - -func (rule *R1030UnexpectedIouringOperation) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, _ objectcache.K8sObjectCache) ruleengine.DetectionResult { - if eventType != utils.IoUringEventType { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - iouringEvent, ok := event.(*traceriouringtype.Event) - if !ok { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil} - } - - ok, _ = iouring.GetOpcodeName(uint8(iouringEvent.Opcode)) - return ruleengine.DetectionResult{IsFailure: ok, Payload: nil} -} - -func (rule *R1030UnexpectedIouringOperation) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (ruleengine.DetectionResult, error) { - detectionResult := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !detectionResult.IsFailure { - return detectionResult, nil - } - - iouringEvent, _ := event.(*traceriouringtype.Event) - if allowed, err := IsAllowed(&iouringEvent.Event, objCache, iouringEvent.Comm, R1030ID); err != nil { - if !errors.Is(err, ruleprocess.NoProfileAvailable) { - logger.L().Debug("RuleManager - failed to check if iouring event is allowed", helpers.Error(err)) - } - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, err - } else if allowed { - return ruleengine.DetectionResult{IsFailure: false, Payload: nil}, nil - } - - return detectionResult, nil -} - -func (rule *R1030UnexpectedIouringOperation) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult) ruleengine.RuleFailure { - iouringEvent, _ := event.(*traceriouringtype.Event) - ok, name := iouring.GetOpcodeName(uint8(iouringEvent.Opcode)) - if !ok { - return nil - } - - return &GenericRuleFailure{ - BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ - UniqueID: HashStringToMD5(fmt.Sprintf("%s%s", name, iouringEvent.Comm)), - AlertName: rule.Name(), - Arguments: map[string]interface{}{ - "opcode": iouringEvent.Opcode, - "flags": iouringEvent.Flags, - "operation": name, - }, - InfectedPID: iouringEvent.Pid, - Severity: R1030UnexpectedIouringOperationRuleDescriptor.Priority, - ProfileMetadata: nil, - Identifiers: &common.Identifiers{ - Process: &common.ProcessEntity{ - Name: iouringEvent.Comm, - }, - }, - }, - RuntimeProcessDetails: apitypes.ProcessTree{ - ProcessTree: apitypes.Process{ - Comm: iouringEvent.Comm, - PID: iouringEvent.Pid, - Uid: &iouringEvent.Uid, - Gid: &iouringEvent.Gid, - }, - ContainerID: iouringEvent.Runtime.ContainerID, - }, - TriggerEvent: iouringEvent.Event, - RuleAlert: apitypes.RuleAlert{ - RuleDescription: fmt.Sprintf("Unexpected io_uring operation detected: %s (opcode=%d) flags=0x%x in %s.", - name, iouringEvent.Opcode, iouringEvent.Flags, iouringEvent.Comm), - }, - RuntimeAlertK8sDetails: apitypes.RuntimeAlertK8sDetails{ - PodName: iouringEvent.GetPod(), - PodLabels: iouringEvent.K8s.PodLabels, - }, - RuleID: rule.ID(), - } -} - -func (rule *R1030UnexpectedIouringOperation) Requirements() ruleengine.RuleSpec { - return &RuleRequirements{ - EventTypes: R1030UnexpectedIouringOperationRuleDescriptor.Requirements.RequiredEventTypes(), - ProfileRequirements: ruleengine.ProfileRequirement{ - ProfileDependency: apitypes.Required, - ProfileType: apitypes.ApplicationProfile, - }, - } -} diff --git a/pkg/ruleengine/v1/r1030_unexpected_iouring_syscall_test.go b/pkg/ruleengine/v1/r1030_unexpected_iouring_syscall_test.go deleted file mode 100644 index 2bfb055ca..000000000 --- a/pkg/ruleengine/v1/r1030_unexpected_iouring_syscall_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package ruleengine - -import ( - "testing" - - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - traceriouringtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/iouring/tracer/types" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" -) - -func TestR1030UnexpectedIouringOperation(t *testing.T) { - // Create a new rule - r := CreateRuleR1030UnexpectedIouringOperation() - if r == nil { - t.Errorf("Expected r to not be nil") - } - - // Setup mock object cache - objCache := RuleObjectCacheMock{} - profile := objCache.ApplicationProfileCache().GetApplicationProfile("test") - if profile == nil { - profile = &v1beta1.ApplicationProfile{ - Spec: v1beta1.ApplicationProfileSpec{ - Containers: []v1beta1.ApplicationProfileContainer{ - { - Name: "test", - PolicyByRuleId: map[string]v1beta1.RulePolicy{ - R1030ID: { - AllowedProcesses: []string{"/usr/bin/allowed-process"}, - }, - }, - }, - }, - }, - } - objCache.SetApplicationProfile(profile) - } - - // Test cases - testCases := []struct { - name string - event *traceriouringtype.Event - expectedAlert bool - }{ - { - name: "Valid io_uring operation with known opcode", - event: &traceriouringtype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Identifier: "test-process", - Opcode: 1, // IORING_OP_NOP - Flags: 0x0, - UserData: 123, - Comm: "test-process", - }, - expectedAlert: true, - }, - { - name: "Whitelisted process", - event: &traceriouringtype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Identifier: "/usr/bin/allowed-process", - Opcode: 1, - Flags: 0x0, - UserData: 123, - Comm: "/usr/bin/allowed-process", - }, - expectedAlert: false, - }, - { - name: "Unknown opcode", - event: &traceriouringtype.Event{ - Event: eventtypes.Event{ - CommonData: eventtypes.CommonData{ - K8s: eventtypes.K8sMetadata{ - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - ContainerName: "test", - }, - }, - }, - }, - Identifier: "test-process", - Opcode: 999, // Invalid opcode - Flags: 0x0, - UserData: 123, - Comm: "test-process", - }, - expectedAlert: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - ruleResult := ruleprocess.ProcessRule(r, utils.IoUringEventType, tc.event, &objCache) - - if tc.expectedAlert && ruleResult == nil { - t.Errorf("Expected alert for io_uring operation but got nil") - } - if !tc.expectedAlert && ruleResult != nil { - t.Errorf("Expected no alert for io_uring operation but got: %v", ruleResult) - } - }) - } - - // Test wrong event type - wrongEvent := &traceriouringtype.Event{} - ruleResult := ruleprocess.ProcessRule(r, utils.HardlinkEventType, wrongEvent, &objCache) - if ruleResult != nil { - t.Errorf("Expected no alert for wrong event type but got: %v", ruleResult) - } - - // Test evaluation with invalid event type - detectionResult := r.EvaluateRule(utils.HardlinkEventType, wrongEvent, objCache.K8sObjectCache()) - if detectionResult.IsFailure { - t.Error("Expected EvaluateRule to return false for wrong event type") - } - - // Test requirements - reqs := r.Requirements() - if len(reqs.RequiredEventTypes()) != 1 || reqs.RequiredEventTypes()[0] != utils.IoUringEventType { - t.Error("Expected Requirements to return IoUringEventType") - } -} diff --git a/pkg/ruleengine/v1/rule.go b/pkg/ruleengine/v1/rule.go deleted file mode 100644 index 94747f905..000000000 --- a/pkg/ruleengine/v1/rule.go +++ /dev/null @@ -1,77 +0,0 @@ -package ruleengine - -import ( - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" - - "github.com/goradd/maps" -) - -const ( - RulePriorityNone = 0 - RulePriorityLow = 1 - RulePriorityMed = 5 - RulePriorityHigh = 8 - RulePriorityCritical = 10 - RulePrioritySystemIssue = 1000 -) - -var _ ruleengine.RuleSpec = (*RuleRequirements)(nil) - -type RuleRequirements struct { - // Needed events for the rule. - EventTypes []utils.EventType - // Profile requirements - ProfileRequirements ruleengine.ProfileRequirement -} - -// Event types required for the rule -func (r *RuleRequirements) RequiredEventTypes() []utils.EventType { - return r.EventTypes -} - -// Profile requirements -func (r *RuleRequirements) GetProfileRequirements() ruleengine.ProfileRequirement { - return r.ProfileRequirements -} - -type BaseRule struct { - // Mutex for protecting rule parameters. - parameters maps.SafeMap[string, interface{}] -} - -func (br *BaseRule) SetParameters(parameters map[string]interface{}) { - for k, v := range parameters { - br.parameters.Set(k, v) - } -} - -func (br *BaseRule) GetParameters() map[string]interface{} { - - // Create a copy to avoid returning a reference to the internal map - parametersCopy := make(map[string]interface{}, br.parameters.Len()) - - br.parameters.Range( - func(key string, value interface{}) bool { - parametersCopy[key] = value - return true - }, - ) - return parametersCopy -} - -// Basic evaluation without profile -func (br *BaseRule) EvaluateRule(eventType utils.EventType, event utils.K8sEvent, _ objectcache.K8sObjectCache) (bool, interface{}) { - return false, nil -} - -// Evaluation with profile if available -func (br *BaseRule) EvaluateRuleWithProfile(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) (bool, interface{}) { - return false, nil -} - -// Create rule failure with available context -func (br *BaseRule) CreateRuleFailure(eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload interface{}) ruleengine.RuleFailure { - return nil -} diff --git a/pkg/rulemanager/cel/cel.go b/pkg/rulemanager/cel/cel.go new file mode 100644 index 000000000..fcfebfdcc --- /dev/null +++ b/pkg/rulemanager/cel/cel.go @@ -0,0 +1,306 @@ +package cel + +import ( + "fmt" + "sync" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/ext" + tracercapabilitiestype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/capabilities/types" + tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" + tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" + tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/ebpf/events" + tracerforktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/fork/types" + tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" + traceriouringtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/iouring/tracer/types" + tracerptracetype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer/types" + tracerrandomxtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/randomx/types" + tracersshtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" + tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/applicationprofile" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/k8s" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/net" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/networkneighborhood" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/parse" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/process" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/utils" + "github.com/picatz/xcel" +) + +var _ CELRuleEvaluator = (*CEL)(nil) + +type CEL struct { + env *cel.Env + objectCache objectcache.ObjectCache + programCache map[string]cel.Program + cacheMutex sync.RWMutex + typeMutex sync.RWMutex + evalContextPool sync.Pool + ta xcel.TypeAdapter + tp *xcel.TypeProvider +} + +func NewCEL(objectCache objectcache.ObjectCache, cfg config.Config) (*CEL, error) { + ta, tp := xcel.NewTypeAdapter(), xcel.NewTypeProvider() + capaObj, capaTyp := xcel.NewObject(&tracercapabilitiestype.Event{}) + xcel.RegisterObject(ta, tp, capaObj, capaTyp, xcel.NewFields(capaObj)) + dnsObj, dnsTyp := xcel.NewObject(&tracerdnstype.Event{}) + xcel.RegisterObject(ta, tp, dnsObj, dnsTyp, xcel.NewFields(dnsObj)) + execObj, execTyp := xcel.NewObject(&events.ExecEvent{}) + xcel.RegisterObject(ta, tp, execObj, execTyp, xcel.NewFields(execObj)) + exitObj, exitTyp := xcel.NewObject(&tracerexectype.Event{}) + xcel.RegisterObject(ta, tp, exitObj, exitTyp, xcel.NewFields(exitObj)) + forkObj, forkTyp := xcel.NewObject(&tracerforktype.Event{}) + xcel.RegisterObject(ta, tp, forkObj, forkTyp, xcel.NewFields(forkObj)) + hardlinkObj, hardlinkTyp := xcel.NewObject(&tracerhardlinktype.Event{}) + xcel.RegisterObject(ta, tp, hardlinkObj, hardlinkTyp, xcel.NewFields(hardlinkObj)) + iouringObj, iouringTyp := xcel.NewObject(&traceriouringtype.Event{}) + xcel.RegisterObject(ta, tp, iouringObj, iouringTyp, xcel.NewFields(iouringObj)) + netObj, netTyp := xcel.NewObject(&tracernetworktype.Event{}) + xcel.RegisterObject(ta, tp, netObj, netTyp, xcel.NewFields(netObj)) + openObj, openTyp := xcel.NewObject(&events.OpenEvent{}) + xcel.RegisterObject(ta, tp, openObj, openTyp, xcel.NewFields(openObj)) + procObj, procTyp := xcel.NewObject(&events.ProcfsEvent{}) + xcel.RegisterObject(ta, tp, procObj, procTyp, xcel.NewFields(procObj)) + ptraceObj, ptraceTyp := xcel.NewObject(&tracerptracetype.Event{}) + xcel.RegisterObject(ta, tp, ptraceObj, ptraceTyp, xcel.NewFields(ptraceObj)) + randObj, randTyp := xcel.NewObject(&tracerrandomxtype.Event{}) + xcel.RegisterObject(ta, tp, randObj, randTyp, xcel.NewFields(randObj)) + sshObj, sshTyp := xcel.NewObject(&tracersshtype.Event{}) + xcel.RegisterObject(ta, tp, sshObj, sshTyp, xcel.NewFields(sshObj)) + symlinkObj, symlinkTyp := xcel.NewObject(&tracersymlinktype.Event{}) + xcel.RegisterObject(ta, tp, symlinkObj, symlinkTyp, xcel.NewFields(symlinkObj)) + syscallObj, syscallTyp := xcel.NewObject(&types.SyscallEvent{}) + xcel.RegisterObject(ta, tp, syscallObj, syscallTyp, xcel.NewFields(syscallObj)) + envOptions := []cel.EnvOption{ + cel.Variable("event_type", cel.StringType), + cel.Variable(string(utils.CapabilitiesEventType), capaTyp), + cel.Variable(string(utils.DnsEventType), dnsTyp), + cel.Variable(string(utils.ExecveEventType), execTyp), + cel.Variable(string(utils.ExitEventType), exitTyp), + cel.Variable(string(utils.ForkEventType), forkTyp), + cel.Variable(string(utils.HardlinkEventType), hardlinkTyp), + cel.Variable(string(utils.IoUringEventType), iouringTyp), + cel.Variable(string(utils.NetworkEventType), netTyp), + cel.Variable(string(utils.OpenEventType), openTyp), + cel.Variable(string(utils.ProcfsEventType), procTyp), + cel.Variable(string(utils.PtraceEventType), ptraceTyp), + cel.Variable(string(utils.RandomXEventType), randTyp), + cel.Variable(string(utils.SSHEventType), sshTyp), + cel.Variable(string(utils.SymlinkEventType), symlinkTyp), + cel.Variable(string(utils.SyscallEventType), syscallTyp), + cel.Variable(string(utils.HTTPEventType), cel.AnyType), + cel.CustomTypeAdapter(ta), + cel.CustomTypeProvider(tp), + ext.Strings(), + k8s.K8s(objectCache.K8sObjectCache(), cfg), + applicationprofile.AP(objectCache, cfg), + networkneighborhood.NN(objectCache, cfg), + parse.Parse(cfg), + net.Net(cfg), + process.Process(cfg), + } + + env, err := cel.NewEnv(envOptions...) + if err != nil { + return nil, err + } + cel := &CEL{ + env: env, + objectCache: objectCache, + programCache: make(map[string]cel.Program), + ta: ta, + tp: tp, + } + + cel.evalContextPool.New = func() interface{} { + return make(map[string]any, 1) + } + + return cel, nil +} + +func (c *CEL) registerExpression(expression string) error { + c.cacheMutex.Lock() + defer c.cacheMutex.Unlock() + + // Check if already compiled + if _, exists := c.programCache[expression]; exists { + return nil + } + + ast, issues := c.env.Compile(expression) + if issues != nil { + return fmt.Errorf("failed to compile expression: %s", issues.Err()) + } + + program, err := c.env.Program(ast, cel.EvalOptions(cel.OptOptimize)) + if err != nil { + return fmt.Errorf("failed to create program: %s", err) + } + + c.programCache[expression] = program + return nil +} + +func (c *CEL) getOrCreateProgram(expression string) (cel.Program, error) { + c.cacheMutex.RLock() + if program, exists := c.programCache[expression]; exists { + c.cacheMutex.RUnlock() + return program, nil + } + c.cacheMutex.RUnlock() + + // If not in cache, compile and cache it + if err := c.registerExpression(expression); err != nil { + return nil, err + } + + c.cacheMutex.RLock() + program := c.programCache[expression] + c.cacheMutex.RUnlock() + return program, nil +} + +func (c *CEL) EvaluateRule(event *events.EnrichedEvent, expressions []typesv1.RuleExpression) (bool, error) { + for _, expression := range expressions { + if expression.EventType != event.EventType { + continue + } + + program, err := c.getOrCreateProgram(expression.Expression) + if err != nil { + return false, err + } + + obj, _ := xcel.NewObject(event.Event) + out, _, err := program.Eval(map[string]any{string(event.EventType): obj, "event_type": string(event.EventType)}) + if err != nil { + return false, err + } + + if !out.Value().(bool) { + return false, nil + } + } + + return true, nil +} + +func (c *CEL) EvaluateRuleByMap(event map[string]any, eventType utils.EventType, expressions []typesv1.RuleExpression) (bool, error) { + // Get evaluation context map from pool to reduce allocations + evalContext := c.evalContextPool.Get().(map[string]any) + defer func() { + // Clear and return to pool + clear(evalContext) + c.evalContextPool.Put(evalContext) + }() + + evalContext[string(eventType)] = event + evalContext["event_type"] = string(eventType) + + for _, expression := range expressions { + if expression.EventType != eventType { + continue + } + + program, err := c.getOrCreateProgram(expression.Expression) + if err != nil { + return false, err + } + + out, _, err := program.Eval(evalContext) + if err != nil { + return false, err + } + + if !out.Value().(bool) { + return false, nil + } + } + + return true, nil +} + +func (c *CEL) EvaluateExpressionByMap(event map[string]any, expression string, eventType utils.EventType) (string, error) { + program, err := c.getOrCreateProgram(expression) + if err != nil { + return "", err + } + + // Get evaluation context map from pool to reduce allocations + evalContext := c.evalContextPool.Get().(map[string]any) + defer func() { + // Clear and return to pool + clear(evalContext) + c.evalContextPool.Put(evalContext) + }() + + evalContext[string(eventType)] = event + evalContext["event_type"] = string(eventType) + + out, _, err := program.Eval(evalContext) + if err != nil { + return "", fmt.Errorf("failed to evaluate expression: %s", err) + } + + return out.Value().(string), nil +} + +func (c *CEL) EvaluateExpression(event *events.EnrichedEvent, expression string) (string, error) { + program, err := c.getOrCreateProgram(expression) + if err != nil { + return "", err + } + + obj, _ := xcel.NewObject(event.Event) + out, _, err := program.Eval(map[string]any{string(event.EventType): obj, "event_type": string(event.EventType)}) + if err != nil { + return "", err + } + + return out.Value().(string), nil +} + +func (c *CEL) RegisterHelper(function cel.EnvOption) error { + extendedEnv, err := c.env.Extend(function) + if err != nil { + return err + } + c.env = extendedEnv + return nil +} + +func (c *CEL) RegisterCustomType(eventType utils.EventType, obj interface{}) error { + c.typeMutex.Lock() + defer c.typeMutex.Unlock() + + // Create new object and type using xcel + xcelObj, xcelTyp := xcel.NewObject(obj) + + // Register the new object with the existing type adapter/provider + xcel.RegisterObject(c.ta, c.tp, xcelObj, xcelTyp, xcel.NewFields(xcelObj)) + + // Extend the environment with the new variable + // This preserves all existing types while adding the new one + extendedEnv, err := c.env.Extend( + cel.Variable(string(eventType), xcelTyp), + ) + if err != nil { + return fmt.Errorf("failed to extend environment with custom type: %w", err) + } + + c.env = extendedEnv + + // Clear program cache since environment has changed + c.cacheMutex.Lock() + c.programCache = make(map[string]cel.Program) + c.cacheMutex.Unlock() + + return nil +} diff --git a/pkg/rulemanager/cel/cel_interface.go b/pkg/rulemanager/cel/cel_interface.go new file mode 100644 index 000000000..94674823e --- /dev/null +++ b/pkg/rulemanager/cel/cel_interface.go @@ -0,0 +1,17 @@ +package cel + +import ( + "github.com/google/cel-go/cel" + "github.com/kubescape/node-agent/pkg/ebpf/events" + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/utils" +) + +type CELRuleEvaluator interface { + EvaluateRule(event *events.EnrichedEvent, expressions []typesv1.RuleExpression) (bool, error) + EvaluateExpressionByMap(event map[string]any, expression string, eventType utils.EventType) (string, error) + EvaluateRuleByMap(event map[string]any, eventType utils.EventType, expressions []typesv1.RuleExpression) (bool, error) + EvaluateExpression(event *events.EnrichedEvent, expression string) (string, error) + RegisterHelper(function cel.EnvOption) error + RegisterCustomType(eventType utils.EventType, obj interface{}) error +} diff --git a/pkg/rulemanager/cel/cost.go b/pkg/rulemanager/cel/cost.go new file mode 100644 index 000000000..6313d12e4 --- /dev/null +++ b/pkg/rulemanager/cel/cost.go @@ -0,0 +1,34 @@ +package cel + +import ( + "github.com/google/cel-go/checker" +) + +// CompositeCostEstimator holds multiple estimators and queries them in order. +type CompositeCostEstimator struct { + estimators []checker.CostEstimator +} + +func NewCompositeCostEstimator(estimators ...checker.CostEstimator) checker.CostEstimator { + return &CompositeCostEstimator{estimators: estimators} +} + +// EstimateCallCost iterates through its estimators and returns the first non-nil estimate. +func (c *CompositeCostEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + for _, e := range c.estimators { + if estimate := e.EstimateCallCost(function, overloadID, target, args); estimate != nil { + return estimate + } + } + return nil +} + +// EstimateSize iterates through its estimators for a size estimate. +func (c *CompositeCostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + for _, e := range c.estimators { + if estimate := e.EstimateSize(element); estimate != nil { + return estimate + } + } + return nil +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/ap.go b/pkg/rulemanager/cel/libraries/applicationprofile/ap.go new file mode 100644 index 000000000..805d78ec4 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/ap.go @@ -0,0 +1,348 @@ +package applicationprofile + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" +) + +func New(objectCache objectcache.ObjectCache, config config.Config) libraries.Library { + return &apLibrary{ + objectCache: objectCache, + functionCache: cache.NewFunctionCache(cache.FunctionCacheConfig{ + MaxSize: config.CelConfigCache.MaxSize, + TTL: config.CelConfigCache.TTL, + }), + } +} + +func AP(objectCache objectcache.ObjectCache, config config.Config) cel.EnvOption { + return cel.Lib(New(objectCache, config)) +} + +type apLibrary struct { + objectCache objectcache.ObjectCache + functionCache *cache.FunctionCache +} + +func (l *apLibrary) LibraryName() string { + return "ap" +} + +func (l *apLibrary) Types() []*cel.Type { + return []*cel.Type{} +} + +func (l *apLibrary) Declarations() map[string][]cel.FunctionOpt { + return map[string][]cel.FunctionOpt{ + "ap.was_executed": { + cel.Overload( + "ap_was_executed", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasExecuted(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_executed") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "ap.was_executed_with_args": { + cel.Overload( + "ap_was_executed_with_args", []*cel.Type{cel.StringType, cel.StringType, cel.ListType(cel.StringType)}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 3 { + return types.NewErr("expected 3 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasExecutedWithArgs(args[0], args[1], args[2]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_executed_with_args") + return cachedFunc(values[0], values[1], values[2]) + }), + ), + }, + "ap.was_path_opened": { + cel.Overload( + "ap_was_path_opened", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasPathOpened(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_path_opened") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "ap.was_path_opened_with_flags": { + cel.Overload( + "ap_was_path_opened_with_flags", []*cel.Type{cel.StringType, cel.StringType, cel.ListType(cel.StringType)}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 3 { + return types.NewErr("expected 3 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasPathOpenedWithFlags(args[0], args[1], args[2]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_path_opened_with_flags") + return cachedFunc(values[0], values[1], values[2]) + }), + ), + }, + "ap.was_path_opened_with_suffix": { + cel.Overload( + "ap_was_path_opened_with_suffix", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasPathOpenedWithSuffix(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_path_opened_with_suffix") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "ap.was_path_opened_with_prefix": { + cel.Overload( + "ap_was_path_opened_with_prefix", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasPathOpenedWithPrefix(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_path_opened_with_prefix") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "ap.was_syscall_used": { + cel.Overload( + "ap_was_syscall_used", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasSyscallUsed(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_syscall_used") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "ap.was_capability_used": { + cel.Overload( + "ap_was_capability_used", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasCapabilityUsed(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_capability_used") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "ap.was_endpoint_accessed": { + cel.Overload( + "ap_was_endpoint_accessed", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasEndpointAccessed(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_endpoint_accessed") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "ap.was_endpoint_accessed_with_method": { + cel.Overload( + "ap_was_endpoint_accessed_with_method", []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 3 { + return types.NewErr("expected 3 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasEndpointAccessedWithMethod(args[0], args[1], args[2]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_endpoint_accessed_with_method") + return cachedFunc(values[0], values[1], values[2]) + }), + ), + }, + "ap.was_endpoint_accessed_with_methods": { + cel.Overload( + "ap_was_endpoint_accessed_with_methods", []*cel.Type{cel.StringType, cel.StringType, cel.ListType(cel.StringType)}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 3 { + return types.NewErr("expected 3 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasEndpointAccessedWithMethods(args[0], args[1], args[2]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_endpoint_accessed_with_methods") + return cachedFunc(values[0], values[1], values[2]) + }), + ), + }, + "ap.was_endpoint_accessed_with_prefix": { + cel.Overload( + "ap_was_endpoint_accessed_with_prefix", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasEndpointAccessedWithPrefix(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_endpoint_accessed_with_prefix") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "ap.was_endpoint_accessed_with_suffix": { + cel.Overload( + "ap_was_endpoint_accessed_with_suffix", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasEndpointAccessedWithSuffix(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_endpoint_accessed_with_suffix") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "ap.was_host_accessed": { + cel.Overload( + "ap_was_host_accessed", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasHostAccessed(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "ap.was_host_accessed") + return cachedFunc(values[0], values[1]) + }), + ), + }, + } +} + +func (l *apLibrary) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{} + for name, overloads := range l.Declarations() { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (l *apLibrary) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func (l *apLibrary) CostEstimator() checker.CostEstimator { + return &apCostEstimator{} +} + +// apCostEstimator implements the checker.CostEstimator for the 'ap' library. +type apCostEstimator struct{} + +func (e *apCostEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + cost := int64(0) + switch function { + case "ap.was_executed": + // Cache lookup + O(n) linear search through execs list + cost = 15 + case "ap.was_executed_with_args": + // Cache lookup + O(n) linear search + O(m) slice comparison for args + cost = 30 + case "ap.was_path_opened": + // Cache lookup + O(n) linear search + dynamic path comparison + cost = 25 + case "ap.was_path_opened_with_flags": + // Cache lookup + O(n) search + dynamic path comparison + O(f*p) flag comparison + cost = 40 + case "ap.was_path_opened_with_suffix": + // Cache lookup + O(n) linear search + O(n*len(suffix)) string suffix checks + cost = 20 + case "ap.was_path_opened_with_prefix": + // Cache lookup + O(n) linear search + O(n*len(prefix)) string prefix checks + cost = 20 + case "ap.was_syscall_used": + // Cache lookup + O(n) slice.Contains search through syscalls + cost = 12 + case "ap.was_capability_used": + // Cache lookup + O(n) slice.Contains search through capabilities + cost = 12 + case "ap.was_endpoint_accessed": + // Cache lookup + O(n) linear search through endpoints + dynamic path comparison + cost = 25 + case "ap.was_endpoint_accessed_with_method": + // Cache lookup + O(n) search + dynamic path comparison + O(m) method check + cost = 30 + case "ap.was_endpoint_accessed_with_methods": + // Cache lookup + O(n) search + dynamic path comparison + O(m*k) method comparison + cost = 35 + case "ap.was_endpoint_accessed_with_prefix": + // Cache lookup + O(n) linear search + O(n*len(prefix)) string prefix checks + cost = 20 + case "ap.was_endpoint_accessed_with_suffix": + // Cache lookup + O(n) linear search + O(n*len(suffix)) string suffix checks + cost = 20 + case "ap.was_host_accessed": + // Cache lookup + O(n) endpoint search + URL parsing + O(m) network neighbor search + cost = 35 + case "ap.was_internal_endpoint_accessed": + // Cache lookup + O(n) linear search through endpoints checking internal flag + cost = 15 + case "ap.was_external_endpoint_accessed": + // Cache lookup + O(n) linear search through endpoints checking internal flag + cost = 15 + case "ap.was_endpoint_accessed_with_direction": + // Cache lookup + O(n) linear search through endpoints + string comparison + cost = 18 + case "ap.was_endpoint_accessed_with_header": + // Cache lookup + O(n) search + JSON unmarshal + header map lookup + cost = 40 + case "ap.was_endpoint_accessed_with_header_value": + // Cache lookup + O(n) search + JSON unmarshal + header map lookup + slice.Contains + cost = 45 + default: + // This estimator doesn't know about other functions. + return nil + } + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: uint64(cost), Max: uint64(cost)}} +} + +func (e *apCostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + return nil // Not providing size estimates for now. +} + +// Ensure the implementation satisfies the interface +var _ checker.CostEstimator = (*apCostEstimator)(nil) +var _ libraries.Library = (*apLibrary)(nil) diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/cache_test.go b/pkg/rulemanager/cel/libraries/applicationprofile/cache_test.go new file mode 100644 index 000000000..12edc6912 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/cache_test.go @@ -0,0 +1,467 @@ +package applicationprofile + +import ( + "testing" + "time" + + "github.com/google/cel-go/cel" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/objectcache" + objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" +) + +func TestApplicationProfileCaching(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + }, + Execs: []v1beta1.ExecCalls{ + { + Path: "/bin/ls", + Args: []string{"-la"}, + }, + }, + Syscalls: []string{"open", "read", "write"}, + Capabilities: []string{"CAP_NET_ADMIN"}, + }) + objCache.SetApplicationProfile(profile) + + // Create library with cache + lib := &apLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Variable("args", cel.ListType(cel.StringType)), + cel.Variable("syscall", cel.StringType), + cel.Variable("capability", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + testCases := []struct { + name string + expression string + vars map[string]interface{} + expected bool + }{ + { + name: "was_path_opened caching", + expression: `ap.was_path_opened(containerID, path)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "path": "/etc/passwd", + }, + expected: true, + }, + { + name: "was_executed caching", + expression: `ap.was_executed(containerID, path)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "path": "/bin/ls", + }, + expected: true, + }, + { + name: "was_executed_with_args caching", + expression: `ap.was_executed_with_args(containerID, path, args)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "path": "/bin/ls", + "args": []string{"-la"}, + }, + expected: true, + }, + { + name: "was_syscall_used caching", + expression: `ap.was_syscall_used(containerID, syscall)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "syscall": "open", + }, + expected: true, + }, + { + name: "was_capability_used caching", + expression: `ap.was_capability_used(containerID, capability)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "capability": "CAP_NET_ADMIN", + }, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(tc.expression) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + // Initial cache should be empty + initialCacheSize := lib.functionCache.GetCacheStats() + + // First call - should cache the result + result1, _, err := program.Eval(tc.vars) + assert.NoError(t, err) + assert.Equal(t, tc.expected, result1.Value()) + + // Cache should have one more entry + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, initialCacheSize+1, cacheSize1, "Cache should have one new entry after first call") + + // Second call with same parameters - should use cache + result2, _, err := program.Eval(tc.vars) + assert.NoError(t, err) + assert.Equal(t, tc.expected, result2.Value()) + + // Cache size should remain the same (cache hit) + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, cacheSize1, cacheSize2, "Cache size should not increase on cache hit") + + // Verify results are identical + assert.Equal(t, result1.Value(), result2.Value(), "Cached and non-cached results should be identical") + }) + } +} + +func TestApplicationProfileCacheDifferentArguments(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + { + Path: "/tmp/test.txt", + Flags: []string{"O_WRONLY"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + lib := &apLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + ast, issues := env.Compile(`ap.was_path_opened(containerID, path)`) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + // Call with first path + result1, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "path": "/etc/passwd", + }) + assert.NoError(t, err) + assert.True(t, result1.Value().(bool)) + + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize1, "Cache should have 1 entry") + + // Call with second path - should create new cache entry + result2, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "path": "/tmp/test.txt", + }) + assert.NoError(t, err) + assert.True(t, result2.Value().(bool)) + + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, 2, cacheSize2, "Cache should have 2 entries for different arguments") + + // Call with non-existent path - should create third cache entry + result3, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "path": "/nonexistent", + }) + assert.NoError(t, err) + assert.False(t, result3.Value().(bool)) + + cacheSize3 := lib.functionCache.GetCacheStats() + assert.Equal(t, 3, cacheSize3, "Cache should have 3 entries for different arguments") +} + +func TestApplicationProfileCacheExpiration(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + // Create cache with short TTL for testing + config := cache.FunctionCacheConfig{ + MaxSize: 100, + TTL: 50 * time.Millisecond, + } + lib := &apLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(config), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + ast, issues := env.Compile(`ap.was_path_opened(containerID, path)`) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + vars := map[string]interface{}{ + "containerID": "test-container-id", + "path": "/etc/passwd", + } + + // First call + result1, _, err := program.Eval(vars) + assert.NoError(t, err) + assert.True(t, result1.Value().(bool)) + + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize1, "Cache should have 1 entry") + + // Wait for cache to expire + time.Sleep(100 * time.Millisecond) + + // Second call after expiration - cache should be empty + result2, _, err := program.Eval(vars) + assert.NoError(t, err) + assert.True(t, result2.Value().(bool)) + + // Cache should have been repopulated + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize2, "Cache should be repopulated after expiration") +} + +func TestApplicationProfileCachePerformance(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + lib := &apLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + ast, issues := env.Compile(`ap.was_path_opened(containerID, path)`) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + vars := map[string]interface{}{ + "containerID": "test-container-id", + "path": "/etc/passwd", + } + + // Measure time for first call (cache miss) + start1 := time.Now() + result1, _, err := program.Eval(vars) + duration1 := time.Since(start1) + assert.NoError(t, err) + assert.True(t, result1.Value().(bool)) + + // Measure time for second call (cache hit) + start2 := time.Now() + result2, _, err := program.Eval(vars) + duration2 := time.Since(start2) + assert.NoError(t, err) + assert.True(t, result2.Value().(bool)) + + // Cache hit should be faster (though this is not guaranteed in all environments) + t.Logf("First call (cache miss): %v", duration1) + t.Logf("Second call (cache hit): %v", duration2) + + // Verify results are the same + assert.Equal(t, result1.Value(), result2.Value()) + + // Verify cache was used + cacheSize := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize, "Cache should contain 1 entry") +} + +func TestApplicationProfileCacheClearCache(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + lib := &apLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + ast, issues := env.Compile(`ap.was_path_opened(containerID, path)`) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + vars := map[string]interface{}{ + "containerID": "test-container-id", + "path": "/etc/passwd", + } + + // First call - populate cache + result1, _, err := program.Eval(vars) + assert.NoError(t, err) + assert.True(t, result1.Value().(bool)) + + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize1, "Cache should have 1 entry") + + // Clear cache + lib.functionCache.ClearCache() + + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, 0, cacheSize2, "Cache should be empty after clear") + + // Call again - should repopulate cache + result2, _, err := program.Eval(vars) + assert.NoError(t, err) + assert.True(t, result2.Value().(bool)) + + cacheSize3 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize3, "Cache should have 1 entry after repopulation") +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/capability.go b/pkg/rulemanager/cel/libraries/applicationprofile/capability.go new file mode 100644 index 000000000..e9cea6579 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/capability.go @@ -0,0 +1,35 @@ +package applicationprofile + +import ( + "slices" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/rulemanager/profilehelper" +) + +func (l *apLibrary) wasCapabilityUsed(containerID, capabilityName ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + capabilityNameStr, ok := capabilityName.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(capabilityName) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + if slices.Contains(container.Capabilities, capabilityNameStr) { + return types.Bool(true) + } + + return types.Bool(false) +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/capability_test.go b/pkg/rulemanager/cel/libraries/applicationprofile/capability_test.go new file mode 100644 index 000000000..6ca09da37 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/capability_test.go @@ -0,0 +1,161 @@ +package applicationprofile + +import ( + "testing" + + "github.com/google/cel-go/cel" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" +) + +func TestCapabilityInProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Capabilities: []string{ + "NET_ADMIN", + "SYS_ADMIN", + "SETUID", + "SETGID", + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("capabilityName", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + containerID string + capabilityName string + expectedResult bool + }{ + { + name: "Capability exists in profile", + containerID: "test-container-id", + capabilityName: "NET_ADMIN", + expectedResult: true, + }, + { + name: "Capability does not exist in profile", + containerID: "test-container-id", + capabilityName: "DAC_OVERRIDE", + expectedResult: false, + }, + { + name: "Another capability exists in profile", + containerID: "test-container-id", + capabilityName: "SYS_ADMIN", + expectedResult: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(`ap.was_capability_used(containerID, capabilityName)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": tc.containerID, + "capabilityName": tc.capabilityName, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "ap.was_capability_used result should match expected value") + }) + } +} + +func TestCapabilityNoProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("capabilityName", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + ast, issues := env.Compile(`ap.was_capability_used(containerID, capabilityName)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "capabilityName": "NET_ADMIN", + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.False(t, actualResult, "ap.was_capability_used should return false when no profile is available") +} + +func TestCapabilityCompilation(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("capabilityName", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + // Test that the function compiles correctly + ast, issues := env.Compile(`ap.was_capability_used(containerID, capabilityName)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + // Test that we can create a program + _, err = env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/exec.go b/pkg/rulemanager/cel/libraries/applicationprofile/exec.go new file mode 100644 index 000000000..6603d672e --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/exec.go @@ -0,0 +1,75 @@ +package applicationprofile + +import ( + "slices" + + "github.com/google/cel-go/common/types" + + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/celparse" + "github.com/kubescape/node-agent/pkg/rulemanager/profilehelper" +) + +func (l *apLibrary) wasExecuted(containerID, path ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + pathStr, ok := path.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(path) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, exec := range container.Execs { + if exec.Path == pathStr { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +func (l *apLibrary) wasExecutedWithArgs(containerID, path, args ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + + pathStr, ok := path.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(path) + } + + celArgs, err := celparse.ParseList[string](args) + if err != nil { + return types.NewErr("failed to parse args: %v", err) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, exec := range container.Execs { + if exec.Path == pathStr { + if slices.Compare(exec.Args, celArgs) == 0 { + return types.Bool(true) + } + } + } + + return types.Bool(false) +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/exec_test.go b/pkg/rulemanager/cel/libraries/applicationprofile/exec_test.go new file mode 100644 index 000000000..8821e7bdf --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/exec_test.go @@ -0,0 +1,326 @@ +package applicationprofile + +import ( + "testing" + + "github.com/google/cel-go/cel" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" +) + +func TestExecInProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Execs: []v1beta1.ExecCalls{ + { + Path: "/bin/ls", + Args: []string{"-la"}, + }, + { + Path: "/usr/bin/curl", + Args: []string{"https://example.com"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + containerID string + path string + expectedResult bool + }{ + { + name: "Path exists in profile", + containerID: "test-container-id", + path: "/bin/ls", + expectedResult: true, + }, + { + name: "Path does not exist in profile", + containerID: "test-container-id", + path: "/bin/nonexistent", + expectedResult: false, + }, + { + name: "Another path exists in profile", + containerID: "test-container-id", + path: "/usr/bin/curl", + expectedResult: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(`ap.was_executed(containerID, path)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": tc.containerID, + "path": tc.path, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "ap.was_executed result should match expected value") + }) + } +} + +func TestExecNoProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + ast, issues := env.Compile(`ap.was_executed(containerID, path)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "path": "/bin/ls", + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.False(t, actualResult, "ap.was_executed should return false when no profile is available") +} + +func TestExecWithArgsInProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Execs: []v1beta1.ExecCalls{ + { + Path: "/bin/ls", + Args: []string{"-la", "/tmp"}, + }, + { + Path: "/usr/bin/curl", + Args: []string{"-X", "POST", "https://example.com"}, + }, + { + Path: "/bin/echo", + Args: []string{"hello", "world"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Variable("args", cel.ListType(cel.StringType)), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + containerID string + path string + args []string + expectedResult bool + }{ + { + name: "Path and args match exactly", + containerID: "test-container-id", + path: "/bin/ls", + args: []string{"-la", "/tmp"}, + expectedResult: true, + }, + { + name: "Path matches but args don't match", + containerID: "test-container-id", + path: "/bin/ls", + args: []string{"-la", "/home"}, + expectedResult: false, + }, + { + name: "Path doesn't exist", + containerID: "test-container-id", + path: "/bin/nonexistent", + args: []string{"arg1", "arg2"}, + expectedResult: false, + }, + { + name: "Complex args match", + containerID: "test-container-id", + path: "/usr/bin/curl", + args: []string{"-X", "POST", "https://example.com"}, + expectedResult: true, + }, + { + name: "Simple args match", + containerID: "test-container-id", + path: "/bin/echo", + args: []string{"hello", "world"}, + expectedResult: true, + }, + { + name: "Empty args list", + containerID: "test-container-id", + path: "/bin/ls", + args: []string{}, + expectedResult: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(`ap.was_executed_with_args(containerID, path, args)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": tc.containerID, + "path": tc.path, + "args": tc.args, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "ap.was_executed_with_args result should match expected value") + }) + } +} + +func TestExecWithArgsNoProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Variable("args", cel.ListType(cel.StringType)), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + ast, issues := env.Compile(`ap.was_executed_with_args(containerID, path, args)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "path": "/bin/ls", + "args": []string{"-la", "/tmp"}, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.False(t, actualResult, "ap.was_executed_with_args should return false when no profile is available") +} + +func TestExecWithArgsCompilation(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Variable("args", cel.ListType(cel.StringType)), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + // Test that the function compiles correctly + ast, issues := env.Compile(`ap.was_executed_with_args(containerID, path, args)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + // Test that we can create a program + _, err = env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/http.go b/pkg/rulemanager/cel/libraries/applicationprofile/http.go new file mode 100644 index 000000000..647e4df2b --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/http.go @@ -0,0 +1,207 @@ +package applicationprofile + +import ( + "net/url" + "slices" + "strings" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/celparse" + "github.com/kubescape/node-agent/pkg/rulemanager/profilehelper" + "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" +) + +// wasEndpointAccessed checks if a specific HTTP endpoint was accessed +func (l *apLibrary) wasEndpointAccessed(containerID, endpoint ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + endpointStr, ok := endpoint.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(endpoint) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, ep := range container.Endpoints { + if dynamicpathdetector.CompareDynamic(ep.Endpoint, endpointStr) { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +// wasEndpointAccessedWithMethod checks if a specific HTTP endpoint was accessed with a specific method +func (l *apLibrary) wasEndpointAccessedWithMethod(containerID, endpoint, method ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + endpointStr, ok := endpoint.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(endpoint) + } + methodStr, ok := method.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(method) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, ep := range container.Endpoints { + if dynamicpathdetector.CompareDynamic(ep.Endpoint, endpointStr) { + if slices.Contains(ep.Methods, methodStr) { + return types.Bool(true) + } + } + } + + return types.Bool(false) +} + +// wasEndpointAccessedWithMethods checks if a specific HTTP endpoint was accessed with any of the specified methods +func (l *apLibrary) wasEndpointAccessedWithMethods(containerID, endpoint, methods ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + endpointStr, ok := endpoint.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(endpoint) + } + + celMethods, err := celparse.ParseList[string](methods) + if err != nil { + return types.NewErr("failed to parse methods: %v", err) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, ep := range container.Endpoints { + if dynamicpathdetector.CompareDynamic(ep.Endpoint, endpointStr) { + for _, method := range celMethods { + if slices.Contains(ep.Methods, method) { + return types.Bool(true) + } + } + } + } + + return types.Bool(false) +} + +// wasEndpointAccessedWithPrefix checks if any HTTP endpoint with the specified prefix was accessed +func (l *apLibrary) wasEndpointAccessedWithPrefix(containerID, prefix ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + prefixStr, ok := prefix.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(prefix) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, ep := range container.Endpoints { + if strings.HasPrefix(ep.Endpoint, prefixStr) { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +// wasEndpointAccessedWithSuffix checks if any HTTP endpoint with the specified suffix was accessed +func (l *apLibrary) wasEndpointAccessedWithSuffix(containerID, suffix ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + suffixStr, ok := suffix.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(suffix) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, ep := range container.Endpoints { + if strings.HasSuffix(ep.Endpoint, suffixStr) { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +// wasHostAccessed checks if a specific host was accessed via HTTP endpoints or network connections +func (l *apLibrary) wasHostAccessed(containerID, host ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + hostStr, ok := host.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(host) + } + + // Check HTTP endpoints for host access + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err == nil { + for _, ep := range container.Endpoints { + // Parse the endpoint URL to extract host + if parsedURL, err := url.Parse(ep.Endpoint); err == nil && parsedURL.Host != "" { + if parsedURL.Host == hostStr || parsedURL.Hostname() == hostStr { + return types.Bool(true) + } + } + // Also check if the endpoint contains the host as a substring (for cases where it's not a full URL) + if strings.Contains(ep.Endpoint, hostStr) { + return types.Bool(true) + } + } + } + return types.Bool(false) +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/integration_test.go b/pkg/rulemanager/cel/libraries/applicationprofile/integration_test.go new file mode 100644 index 000000000..885ace3f4 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/integration_test.go @@ -0,0 +1,142 @@ +package applicationprofile + +import ( + "testing" + + "github.com/google/cel-go/cel" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" +) + +func TestIntegrationWithAllFunctions(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Execs: []v1beta1.ExecCalls{ + { + Path: "/bin/bash", + Args: []string{"/bin/bash", "-c", "curl http://example.com"}, + }, + { + Path: "/usr/bin/ls", + Args: []string{"/usr/bin/ls", "-la"}, + }, + }, + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + { + Path: "/tmp/suspicious.txt", + Flags: []string{"O_WRONLY", "O_CREAT"}, + }, + }, + Syscalls: []string{ + "open", + "read", + "write", + "execve", + "fork", + }, + Capabilities: []string{ + "NET_ADMIN", + "SYS_ADMIN", + "SETUID", + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + expression string + expectedResult bool + }{ + { + name: "Check suspicious execution pattern", + expression: `ap.was_executed_with_args(containerID, "/bin/bash", ["/bin/bash", "-c", "curl http://example.com"])`, + expectedResult: true, + }, + { + name: "Check file access pattern", + expression: `ap.was_path_opened_with_flags(containerID, "/etc/passwd", ["O_RDONLY"])`, + expectedResult: true, + }, + { + name: "Check dangerous syscall usage", + expression: `ap.was_syscall_used(containerID, "execve")`, + expectedResult: true, + }, + { + name: "Check dangerous capability usage", + expression: `ap.was_capability_used(containerID, "SYS_ADMIN")`, + expectedResult: true, + }, + { + name: "Complex security check - suspicious behavior", + expression: `ap.was_executed_with_args(containerID, "/bin/bash", ["/bin/bash", "-c", "curl http://example.com"]) && ap.was_path_opened(containerID, "/etc/passwd") && ap.was_syscall_used(containerID, "execve")`, + expectedResult: true, + }, + { + name: "Complex security check - dangerous capabilities", + expression: `ap.was_capability_used(containerID, "NET_ADMIN") || ap.was_capability_used(containerID, "SYS_ADMIN")`, + expectedResult: true, + }, + { + name: "Check non-existent operations", + expression: `ap.was_executed(containerID, "/bin/nonexistent") || ap.was_syscall_used(containerID, "nonexistent_syscall")`, + expectedResult: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(tc.expression) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "Expression result should match expected value for: %s", tc.expression) + }) + } +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/open.go b/pkg/rulemanager/cel/libraries/applicationprofile/open.go new file mode 100644 index 000000000..5c4b95588 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/open.go @@ -0,0 +1,143 @@ +package applicationprofile + +import ( + "strings" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/celparse" + "github.com/kubescape/node-agent/pkg/rulemanager/profilehelper" + "github.com/kubescape/storage/pkg/registry/file/dynamicpathdetector" +) + +func (l *apLibrary) wasPathOpened(containerID, path ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + pathStr, ok := path.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(path) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, open := range container.Opens { + if dynamicpathdetector.CompareDynamic(open.Path, pathStr) { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +func (l *apLibrary) wasPathOpenedWithFlags(containerID, path, flags ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + + pathStr, ok := path.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(path) + } + + celFlags, err := celparse.ParseList[string](flags) + if err != nil { + return types.NewErr("failed to parse flags: %v", err) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, open := range container.Opens { + if dynamicpathdetector.CompareDynamic(open.Path, pathStr) { + if compareOpenFlags(celFlags, open.Flags) { + return types.Bool(true) + } + } + } + + return types.Bool(false) +} + +func (l *apLibrary) wasPathOpenedWithSuffix(containerID, suffix ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + suffixStr, ok := suffix.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(suffix) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, open := range container.Opens { + if strings.HasSuffix(open.Path, suffixStr) { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +func (l *apLibrary) wasPathOpenedWithPrefix(containerID, prefix ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + prefixStr, ok := prefix.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(prefix) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, open := range container.Opens { + if strings.HasPrefix(open.Path, prefixStr) { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +func compareOpenFlags(eventOpenFlags []string, profileOpenFlags []string) bool { + found := 0 + for _, eventOpenFlag := range eventOpenFlags { + for _, profileOpenFlag := range profileOpenFlags { + if eventOpenFlag == profileOpenFlag { + found += 1 + } + } + } + return found == len(eventOpenFlags) +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/open_test.go b/pkg/rulemanager/cel/libraries/applicationprofile/open_test.go new file mode 100644 index 000000000..86bad2b1a --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/open_test.go @@ -0,0 +1,738 @@ +package applicationprofile + +import ( + "testing" + + "github.com/google/cel-go/cel" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" +) + +func TestOpenInProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + { + Path: "/tmp/test.txt", + Flags: []string{"O_WRONLY", "O_CREAT"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + containerID string + path string + expectedResult bool + }{ + { + name: "Path exists in profile", + containerID: "test-container-id", + path: "/etc/passwd", + expectedResult: true, + }, + { + name: "Path does not exist in profile", + containerID: "test-container-id", + path: "/etc/nonexistent", + expectedResult: false, + }, + { + name: "Another path exists in profile", + containerID: "test-container-id", + path: "/tmp/test.txt", + expectedResult: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(`ap.was_path_opened(containerID, path)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": tc.containerID, + "path": tc.path, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "ap.was_path_opened result should match expected value") + }) + } +} + +func TestOpenNoProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + ast, issues := env.Compile(`ap.was_path_opened(containerID, path)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "path": "/etc/passwd", + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.False(t, actualResult, "ap.was_path_opened should return false when no profile is available") +} + +func TestOpenWithFlagsInProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + { + Path: "/tmp/test.txt", + Flags: []string{"O_WRONLY", "O_CREAT"}, + }, + { + Path: "/var/log/app.log", + Flags: []string{"O_RDWR", "O_APPEND"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Variable("flags", cel.ListType(cel.StringType)), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + containerID string + path string + flags []string + expectedResult bool + }{ + { + name: "Path and flags match exactly", + containerID: "test-container-id", + path: "/etc/passwd", + flags: []string{"O_RDONLY"}, + expectedResult: true, + }, + { + name: "Path matches but flags don't match", + containerID: "test-container-id", + path: "/etc/passwd", + flags: []string{"O_WRONLY"}, + expectedResult: false, + }, + { + name: "Path doesn't exist", + containerID: "test-container-id", + path: "/etc/nonexistent", + flags: []string{"O_RDONLY"}, + expectedResult: false, + }, + { + name: "Multiple flags match", + containerID: "test-container-id", + path: "/tmp/test.txt", + flags: []string{"O_WRONLY", "O_CREAT"}, + expectedResult: true, + }, + { + name: "Multiple flags in different order", + containerID: "test-container-id", + path: "/tmp/test.txt", + flags: []string{"O_CREAT", "O_WRONLY"}, + expectedResult: true, + }, + { + name: "Partial flags match", + containerID: "test-container-id", + path: "/tmp/test.txt", + flags: []string{"O_WRONLY"}, + expectedResult: true, + }, + { + name: "Empty flags list", + containerID: "test-container-id", + path: "/etc/passwd", + flags: []string{}, + expectedResult: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(`ap.was_path_opened_with_flags(containerID, path, flags)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": tc.containerID, + "path": tc.path, + "flags": tc.flags, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "ap.was_path_opened_with_flags result should match expected value") + }) + } +} + +func TestOpenWithFlagsNoProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Variable("flags", cel.ListType(cel.StringType)), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + ast, issues := env.Compile(`ap.was_path_opened_with_flags(containerID, path, flags)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "path": "/etc/passwd", + "flags": []string{"O_RDONLY"}, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.False(t, actualResult, "ap.was_path_opened_with_flags should return false when no profile is available") +} + +func TestOpenWithFlagsCompilation(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + cel.Variable("flags", cel.ListType(cel.StringType)), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + // Test that the function compiles correctly + ast, issues := env.Compile(`ap.was_path_opened_with_flags(containerID, path, flags)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + // Test that we can create a program + _, err = env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } +} + +func TestOpenCompilation(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("path", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + // Test that the function compiles correctly + ast, issues := env.Compile(`ap.was_path_opened(containerID, path)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + // Test that we can create a program + _, err = env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } +} + +func TestOpenWithSuffixInProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + { + Path: "/tmp/test.txt", + Flags: []string{"O_WRONLY", "O_CREAT"}, + }, + { + Path: "/var/log/app.log", + Flags: []string{"O_RDWR", "O_APPEND"}, + }, + { + Path: "/home/user/config.json", + Flags: []string{"O_RDONLY"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("suffix", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + containerID string + suffix string + expectedResult bool + }{ + { + name: "Suffix matches .txt file", + containerID: "test-container-id", + suffix: ".txt", + expectedResult: true, + }, + { + name: "Suffix matches .log file", + containerID: "test-container-id", + suffix: ".log", + expectedResult: true, + }, + { + name: "Suffix matches .json file", + containerID: "test-container-id", + suffix: ".json", + expectedResult: true, + }, + { + name: "Suffix doesn't match any file", + containerID: "test-container-id", + suffix: ".xml", + expectedResult: false, + }, + { + name: "Empty suffix", + containerID: "test-container-id", + suffix: "", + expectedResult: true, // All paths end with empty string + }, + { + name: "Suffix matches exact path", + containerID: "test-container-id", + suffix: "/etc/passwd", + expectedResult: true, + }, + { + name: "Partial suffix doesn't match", + containerID: "test-container-id", + suffix: "xyz", + expectedResult: false, // None of the paths end with "xyz" + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(`ap.was_path_opened_with_suffix(containerID, suffix)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": tc.containerID, + "suffix": tc.suffix, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "ap.was_path_opened_with_suffix result should match expected value") + }) + } +} + +func TestOpenWithSuffixNoProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("suffix", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + ast, issues := env.Compile(`ap.was_path_opened_with_suffix(containerID, suffix)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "suffix": ".txt", + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.False(t, actualResult, "ap.was_path_opened_with_suffix should return false when no profile is available") +} + +func TestOpenWithPrefixInProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Opens: []v1beta1.OpenCalls{ + { + Path: "/etc/passwd", + Flags: []string{"O_RDONLY"}, + }, + { + Path: "/tmp/test.txt", + Flags: []string{"O_WRONLY", "O_CREAT"}, + }, + { + Path: "/var/log/app.log", + Flags: []string{"O_RDWR", "O_APPEND"}, + }, + { + Path: "/home/user/config.json", + Flags: []string{"O_RDONLY"}, + }, + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("prefix", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + containerID string + prefix string + expectedResult bool + }{ + { + name: "Prefix matches /etc", + containerID: "test-container-id", + prefix: "/etc", + expectedResult: true, + }, + { + name: "Prefix matches /tmp", + containerID: "test-container-id", + prefix: "/tmp", + expectedResult: true, + }, + { + name: "Prefix matches /var", + containerID: "test-container-id", + prefix: "/var", + expectedResult: true, + }, + { + name: "Prefix matches /home", + containerID: "test-container-id", + prefix: "/home", + expectedResult: true, + }, + { + name: "Prefix doesn't match any path", + containerID: "test-container-id", + prefix: "/usr", + expectedResult: false, + }, + { + name: "Empty prefix", + containerID: "test-container-id", + prefix: "", + expectedResult: true, // All paths start with empty string + }, + { + name: "Prefix matches exact path", + containerID: "test-container-id", + prefix: "/etc/passwd", + expectedResult: true, + }, + { + name: "Partial prefix doesn't match", + containerID: "test-container-id", + prefix: "etc", + expectedResult: false, // /etc/passwd doesn't start with "etc" + }, + { + name: "Prefix with trailing slash", + containerID: "test-container-id", + prefix: "/etc/", + expectedResult: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(`ap.was_path_opened_with_prefix(containerID, prefix)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": tc.containerID, + "prefix": tc.prefix, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "ap.was_path_opened_with_prefix result should match expected value") + }) + } +} + +func TestOpenWithPrefixNoProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("prefix", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + ast, issues := env.Compile(`ap.was_path_opened_with_prefix(containerID, prefix)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "prefix": "/etc", + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.False(t, actualResult, "ap.was_path_opened_with_prefix should return false when no profile is available") +} + +func TestOpenWithSuffixCompilation(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("suffix", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + // Test that the function compiles correctly + ast, issues := env.Compile(`ap.was_path_opened_with_suffix(containerID, suffix)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + // Test that we can create a program + _, err = env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } +} + +func TestOpenWithPrefixCompilation(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("prefix", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + // Test that the function compiles correctly + ast, issues := env.Compile(`ap.was_path_opened_with_prefix(containerID, prefix)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + // Test that we can create a program + _, err = env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/syscall.go b/pkg/rulemanager/cel/libraries/applicationprofile/syscall.go new file mode 100644 index 000000000..9f86ef032 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/syscall.go @@ -0,0 +1,35 @@ +package applicationprofile + +import ( + "slices" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/rulemanager/profilehelper" +) + +func (l *apLibrary) wasSyscallUsed(containerID, syscallName ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + syscallNameStr, ok := syscallName.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(syscallName) + } + + container, err := profilehelper.GetContainerApplicationProfile(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + if slices.Contains(container.Syscalls, syscallNameStr) { + return types.Bool(true) + } + + return types.Bool(false) +} diff --git a/pkg/rulemanager/cel/libraries/applicationprofile/syscall_test.go b/pkg/rulemanager/cel/libraries/applicationprofile/syscall_test.go new file mode 100644 index 000000000..9b9743a8d --- /dev/null +++ b/pkg/rulemanager/cel/libraries/applicationprofile/syscall_test.go @@ -0,0 +1,168 @@ +package applicationprofile + +import ( + "testing" + "time" + + "github.com/google/cel-go/cel" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" +) + +func TestSyscallInProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + profile := &v1beta1.ApplicationProfile{} + profile.Spec.Containers = append(profile.Spec.Containers, v1beta1.ApplicationProfileContainer{ + Name: "test-container", + Syscalls: []string{ + "open", + "read", + "write", + "close", + }, + }) + objCache.SetApplicationProfile(profile) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("syscallName", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + containerID string + syscallName string + expectedResult bool + }{ + { + name: "Syscall exists in profile", + containerID: "test-container-id", + syscallName: "open", + expectedResult: true, + }, + { + name: "Syscall does not exist in profile", + containerID: "test-container-id", + syscallName: "fork", + expectedResult: false, + }, + { + name: "Another syscall exists in profile", + containerID: "test-container-id", + syscallName: "read", + expectedResult: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(`ap.was_syscall_used(containerID, syscallName)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": tc.containerID, + "syscallName": tc.syscallName, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "ap.was_syscall_used result should match expected value") + }) + } +} + +func TestSyscallNoProfile(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("syscallName", cel.StringType), + AP(&objCache, config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + ast, issues := env.Compile(`ap.was_syscall_used(containerID, syscallName)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "syscallName": "open", + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.False(t, actualResult, "ap.was_syscall_used should return false when no profile is available") +} + +func TestSyscallCompilation(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{} + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("syscallName", cel.StringType), + AP(&objCache, config.Config{ + CelConfigCache: cache.FunctionCacheConfig{ + MaxSize: 1000, + TTL: 1 * time.Minute, + }, + }), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + // Test that the function compiles correctly + ast, issues := env.Compile(`ap.was_syscall_used(containerID, syscallName)`) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + // Test that we can create a program + _, err = env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } +} diff --git a/pkg/rulemanager/cel/libraries/cache/function_cache.go b/pkg/rulemanager/cel/libraries/cache/function_cache.go new file mode 100644 index 000000000..2d6698ca7 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/cache/function_cache.go @@ -0,0 +1,107 @@ +package cache + +import ( + "fmt" + "strings" + "time" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/hashicorp/golang-lru/v2/expirable" +) + +type FunctionCacheConfig struct { + MaxSize int `mapstructure:"maxSize"` + TTL time.Duration `mapstructure:"ttl"` +} + +func DefaultFunctionCacheConfig() FunctionCacheConfig { + return FunctionCacheConfig{ + MaxSize: 1000, + TTL: time.Minute, + } +} + +type FunctionCache struct { + cache *expirable.LRU[string, ref.Val] +} + +func NewFunctionCache(config FunctionCacheConfig) *FunctionCache { + if config.MaxSize <= 0 { + config.MaxSize = 1000 + } + if config.TTL <= 0 { + config.TTL = time.Minute + } + + cache := expirable.NewLRU[string, ref.Val](config.MaxSize, nil, config.TTL) + + return &FunctionCache{ + cache: cache, + } +} + +type CelFunction func(...ref.Val) ref.Val + +func (fc *FunctionCache) WithCache(fn CelFunction, functionName string) CelFunction { + return func(values ...ref.Val) ref.Val { + key := fc.generateCacheKey(functionName, values...) + + if cached, found := fc.cache.Get(key); found { + return cached + } + + result := fn(values...) + + if !types.IsError(result) { + fc.cache.Add(key, result) + } + + return result + } +} + +func (fc *FunctionCache) generateCacheKey(functionName string, values ...ref.Val) string { + var keyParts []string + keyParts = append(keyParts, functionName) + + for _, val := range values { + keyParts = append(keyParts, fc.valueToString(val)) + } + + // Don't sort - maintain order with function name first, then arguments in order + return strings.Join(keyParts, "|") +} + +func (fc *FunctionCache) valueToString(val ref.Val) string { + if val == nil { + return "nil" + } + + switch v := val.Value().(type) { + case string: + return v + case bool: + return fmt.Sprintf("%t", v) + case int64: + return fmt.Sprintf("%d", v) + case float64: + return fmt.Sprintf("%f", v) + case []interface{}: + var parts []string + for _, item := range v { + parts = append(parts, fmt.Sprintf("%v", item)) + } + return "[" + strings.Join(parts, ",") + "]" + default: + return fmt.Sprintf("%v", v) + } +} + +func (fc *FunctionCache) ClearCache() { + fc.cache.Purge() +} + +func (fc *FunctionCache) GetCacheStats() (size int) { + return fc.cache.Len() +} diff --git a/pkg/rulemanager/cel/libraries/cache/function_cache_test.go b/pkg/rulemanager/cel/libraries/cache/function_cache_test.go new file mode 100644 index 000000000..49412116b --- /dev/null +++ b/pkg/rulemanager/cel/libraries/cache/function_cache_test.go @@ -0,0 +1,203 @@ +package cache + +import ( + "testing" + "time" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/stretchr/testify/assert" +) + +func TestFunctionCache_WithCache_BasicUsage(t *testing.T) { + cache := NewFunctionCache(DefaultFunctionCacheConfig()) + + // Mock function that returns a simple boolean + callCount := 0 + mockFunc := func(values ...ref.Val) ref.Val { + callCount++ + // Simple logic: return true if first arg equals "test" + if len(values) > 0 { + if str, ok := values[0].Value().(string); ok && str == "test" { + return types.Bool(true) + } + } + return types.Bool(false) + } + + // Wrap the function with caching + cachedFunc := cache.WithCache(mockFunc, "test_function") + + // First call - should hit the original function + result1 := cachedFunc(types.String("test")) + assert.Equal(t, true, result1.Value().(bool)) + assert.Equal(t, 1, callCount) + + // Second call with same args - should hit cache + result2 := cachedFunc(types.String("test")) + assert.Equal(t, true, result2.Value().(bool)) + assert.Equal(t, 1, callCount) // callCount should not increase + + // Third call with different args - should hit original function + result3 := cachedFunc(types.String("different")) + assert.Equal(t, false, result3.Value().(bool)) + assert.Equal(t, 2, callCount) // callCount should increase +} + +func TestFunctionCache_WithCache_MultipleArguments(t *testing.T) { + cache := NewFunctionCache(DefaultFunctionCacheConfig()) + + callCount := 0 + mockFunc := func(values ...ref.Val) ref.Val { + callCount++ + // Return true if we have exactly 2 arguments + return types.Bool(len(values) == 2) + } + + cachedFunc := cache.WithCache(mockFunc, "multi_arg_function") + + // Call with 2 arguments + result1 := cachedFunc(types.String("arg1"), types.String("arg2")) + assert.Equal(t, true, result1.Value().(bool)) + assert.Equal(t, 1, callCount) + + // Same call should hit cache + result2 := cachedFunc(types.String("arg1"), types.String("arg2")) + assert.Equal(t, true, result2.Value().(bool)) + assert.Equal(t, 1, callCount) + + // Different arguments should miss cache + result3 := cachedFunc(types.String("arg3"), types.String("arg4")) + assert.Equal(t, true, result3.Value().(bool)) + assert.Equal(t, 2, callCount) +} + +func TestFunctionCache_WithCache_ErrorsNotCached(t *testing.T) { + cache := NewFunctionCache(DefaultFunctionCacheConfig()) + + callCount := 0 + mockFunc := func(values ...ref.Val) ref.Val { + callCount++ + // Always return an error + return types.NewErr("test error") + } + + cachedFunc := cache.WithCache(mockFunc, "error_function") + + // First call + result1 := cachedFunc(types.String("test")) + assert.True(t, types.IsError(result1)) + assert.Equal(t, 1, callCount) + + // Second call with same args - should NOT hit cache (errors aren't cached) + result2 := cachedFunc(types.String("test")) + assert.True(t, types.IsError(result2)) + assert.Equal(t, 2, callCount) // Should increase since errors aren't cached +} + +func TestFunctionCache_TTL_Expiration(t *testing.T) { + // Create cache with very short TTL for testing + config := FunctionCacheConfig{ + MaxSize: 100, + TTL: 50 * time.Millisecond, + } + cache := NewFunctionCache(config) + + callCount := 0 + mockFunc := func(values ...ref.Val) ref.Val { + callCount++ + return types.Bool(true) + } + + cachedFunc := cache.WithCache(mockFunc, "ttl_function") + + // First call + result1 := cachedFunc(types.String("test")) + assert.Equal(t, true, result1.Value().(bool)) + assert.Equal(t, 1, callCount) + + // Second call immediately - should hit cache + result2 := cachedFunc(types.String("test")) + assert.Equal(t, true, result2.Value().(bool)) + assert.Equal(t, 1, callCount) + + // Wait for TTL to expire + time.Sleep(60 * time.Millisecond) + + // Third call - should miss cache due to expiration + result3 := cachedFunc(types.String("test")) + assert.Equal(t, true, result3.Value().(bool)) + assert.Equal(t, 2, callCount) // Should increase due to expiration +} + +func TestFunctionCache_ClearCache(t *testing.T) { + cache := NewFunctionCache(DefaultFunctionCacheConfig()) + + callCount := 0 + mockFunc := func(values ...ref.Val) ref.Val { + callCount++ + return types.Bool(true) + } + + cachedFunc := cache.WithCache(mockFunc, "clear_test_function") + + // First call + cachedFunc(types.String("test")) + assert.Equal(t, 1, callCount) + assert.Equal(t, 1, cache.GetCacheStats()) + + // Second call - should hit cache + cachedFunc(types.String("test")) + assert.Equal(t, 1, callCount) + + // Clear cache + cache.ClearCache() + assert.Equal(t, 0, cache.GetCacheStats()) + + // Third call - should miss cache due to clear + cachedFunc(types.String("test")) + assert.Equal(t, 2, callCount) +} + +func TestFunctionCache_GenerateCacheKey(t *testing.T) { + cache := NewFunctionCache(DefaultFunctionCacheConfig()) + + tests := []struct { + name string + functionName string + values []ref.Val + expected string + }{ + { + name: "single string argument", + functionName: "test_func", + values: []ref.Val{types.String("hello")}, + expected: "test_func|hello", + }, + { + name: "multiple arguments", + functionName: "multi_func", + values: []ref.Val{types.String("arg1"), types.String("arg2")}, + expected: "multi_func|arg1|arg2", + }, + { + name: "mixed types", + functionName: "mixed_func", + values: []ref.Val{types.String("str"), types.Bool(true), types.Int(42)}, + expected: "mixed_func|str|true|42", + }, + { + name: "no arguments", + functionName: "no_args", + values: []ref.Val{}, + expected: "no_args", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := cache.generateCacheKey(tt.functionName, tt.values...) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/rulemanager/cel/libraries/celparse/parseargs.go b/pkg/rulemanager/cel/libraries/celparse/parseargs.go new file mode 100644 index 000000000..0c9e8b3fd --- /dev/null +++ b/pkg/rulemanager/cel/libraries/celparse/parseargs.go @@ -0,0 +1,42 @@ +package celparse + +import ( + "fmt" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +type CELListError struct { + Message string +} + +func (e *CELListError) Error() string { + return e.Message +} + +func ParseList[T any](list ref.Val) ([]T, error) { + argsList, ok := list.(traits.Lister) + if !ok { + return nil, &CELListError{Message: "invalid list format: expected list"} + } + + sizeVal := argsList.Size() + size, ok := sizeVal.Value().(int64) + if !ok { + return nil, &CELListError{Message: "invalid list size type"} + } + + result := make([]T, size) + for i := int64(0); i < size; i++ { + val := argsList.Get(types.Int(i)) + typedVal, ok := val.Value().(T) + if !ok { + return nil, &CELListError{Message: fmt.Sprintf("invalid element type in list at index %d", i)} + } + result[i] = typedVal + } + + return result, nil +} diff --git a/pkg/rulemanager/cel/libraries/k8s/k8s.go b/pkg/rulemanager/cel/libraries/k8s/k8s.go new file mode 100644 index 000000000..b468d2de3 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/k8s/k8s.go @@ -0,0 +1,236 @@ +package k8s + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" +) + +func New(k8sObjCache objectcache.K8sObjectCache, config config.Config) libraries.Library { + return &k8sLibrary{ + k8sObjCache: k8sObjCache, + functionCache: cache.NewFunctionCache(cache.FunctionCacheConfig{ + MaxSize: config.CelConfigCache.MaxSize, + TTL: config.CelConfigCache.TTL, + }), + } +} + +func K8s(k8sObjCache objectcache.K8sObjectCache, config config.Config) cel.EnvOption { + return cel.Lib(New(k8sObjCache, config)) +} + +type k8sLibrary struct { + k8sObjCache objectcache.K8sObjectCache + functionCache *cache.FunctionCache +} + +func (l *k8sLibrary) LibraryName() string { + return "k8s" +} + +func (l *k8sLibrary) Types() []*cel.Type { + return []*cel.Type{} +} + +func (l *k8sLibrary) Declarations() map[string][]cel.FunctionOpt { + return map[string][]cel.FunctionOpt{ + "k8s.get_container_mount_paths": { + cel.Overload( + "k8s_get_container_mount_paths", []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, cel.ListType(cel.StringType), + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 3 { + return types.NewErr("expected 3 arguments, got %d", len(values)) + } + return l.getContainerMountPaths(values[0], values[1], values[2]) + }), + ), + }, + "k8s.is_api_server_address": { + cel.Overload( + "k8s_is_api_server_address", []*cel.Type{cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 1 { + return types.NewErr("expected 1 argument, got %d", len(values)) + } + return l.isApiServerAddress(values[0]) + }), + ), + }, + "k8s.get_container_by_name": { + cel.Overload( + "k8s_get_container_by_name", []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 3 { + return types.NewErr("expected 3 arguments, got %d", len(values)) + } + return l.getContainerByName(values[0], values[1], values[2]) + }), + ), + }, + } +} + +func (l *k8sLibrary) getContainerMountPaths(namespace, podName, containerName ref.Val) ref.Val { + if l.k8sObjCache == nil { + return types.NewErr("k8sObjCache is nil") + } + + namespaceStr, ok := namespace.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(namespace) + } + podNameStr, ok := podName.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(podName) + } + containerNameStr, ok := containerName.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerName) + } + + podSpec := l.k8sObjCache.GetPodSpec(namespaceStr, podNameStr) + if podSpec == nil { + return types.NewErr("pod spec not available for %s/%s", namespaceStr, podNameStr) + } + + var mountPaths []string + for _, container := range podSpec.Containers { + if container.Name == containerNameStr { + for _, volumeMount := range container.VolumeMounts { + mountPaths = append(mountPaths, volumeMount.MountPath) + } + } + } + + for _, container := range podSpec.InitContainers { + if container.Name == containerNameStr { + for _, volumeMount := range container.VolumeMounts { + mountPaths = append(mountPaths, volumeMount.MountPath) + } + } + } + + for _, container := range podSpec.EphemeralContainers { + if container.Name == containerNameStr { + for _, volumeMount := range container.VolumeMounts { + mountPaths = append(mountPaths, volumeMount.MountPath) + } + } + } + + return types.NewDynamicList(types.DefaultTypeAdapter, mountPaths) +} + +func (l *k8sLibrary) isApiServerAddress(address ref.Val) ref.Val { + if l.k8sObjCache == nil { + return types.NewErr("k8sObjCache is nil") + } + + addressStr, ok := address.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(address) + } + + apiServerAddress := l.k8sObjCache.GetApiServerIpAddress() + if apiServerAddress == "" { + return types.Bool(false) + } + + return types.Bool(addressStr == apiServerAddress) +} + +func (l *k8sLibrary) getContainerByName(namespace, podName, containerName ref.Val) ref.Val { + if l.k8sObjCache == nil { + return types.NewErr("k8sObjCache is nil") + } + + namespaceStr, ok := namespace.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(namespace) + } + podNameStr, ok := podName.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(podName) + } + containerNameStr, ok := containerName.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerName) + } + + podSpec := l.k8sObjCache.GetPodSpec(namespaceStr, podNameStr) + if podSpec == nil { + return types.Bool(false) + } + + // Check regular containers + for _, container := range podSpec.Containers { + if container.Name == containerNameStr { + return types.Bool(true) + } + } + + // Check init containers + for _, container := range podSpec.InitContainers { + if container.Name == containerNameStr { + return types.Bool(true) + } + } + + // Check ephemeral containers + for _, container := range podSpec.EphemeralContainers { + if container.Name == containerNameStr { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +func (l *k8sLibrary) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{} + for name, overloads := range l.Declarations() { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (l *k8sLibrary) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func (l *k8sLibrary) CostEstimator() checker.CostEstimator { + return &k8sCostEstimator{} +} + +// k8sCostEstimator implements the checker.CostEstimator for the 'k8s' library. +type k8sCostEstimator struct{} + +func (e *k8sCostEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + cost := int64(0) + switch function { + case "k8s.get_container_mount_paths": + // Cache lookup + O(n) search through 3 container types + O(m) volume mount iteration + cost = 25 + case "k8s.is_api_server_address": + // Cache lookup + simple string comparison - O(1) + cost = 5 + case "k8s.get_container_by_name": + // Cache lookup + O(n) search through 3 container types by name + cost = 15 + } + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: uint64(cost), Max: uint64(cost)}} +} + +func (e *k8sCostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + return nil // Not providing size estimates for now. +} + +// Ensure the implementation satisfies the interface +var _ checker.CostEstimator = (*k8sCostEstimator)(nil) +var _ libraries.Library = (*k8sLibrary)(nil) diff --git a/pkg/rulemanager/cel/libraries/k8s/k8s_test.go b/pkg/rulemanager/cel/libraries/k8s/k8s_test.go new file mode 100644 index 000000000..e8001c2a8 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/k8s/k8s_test.go @@ -0,0 +1,205 @@ +package k8s + +import ( + "context" + "testing" + + "github.com/google/cel-go/cel" + "github.com/kubescape/k8s-interface/k8sinterface" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache/k8scache" + "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestK8sLibrary(t *testing.T) { + t.Setenv("KUBERNETES_SERVICE_HOST", "127.0.0.1") + t.Setenv("KUBERNETES_SERVICE_PORT", "6443") + + mockK8sClient := k8sinterface.NewKubernetesApiMock() + + k8sObjCache, err := k8scache.NewK8sObjectCache("test", mockK8sClient) + if err != nil { + t.Fatalf("failed to create k8s object cache: %v", err) + } + + // Create a proper Pod object and add it to the cache + testPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "config-volume", + MountPath: "/etc/config", + }, + { + Name: "data-volume", + MountPath: "/var/data", + }, + }, + }, + { + Name: "test-container-2", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "logs-volume", + MountPath: "/var/logs", + }, + }, + }, + }, + }, + } + + // Add the pod to the cache directly + k8sObjCache.AddHandler(context.Background(), testPod) + objectCache := objectcache.NewObjectCache(k8sObjCache, nil, nil, nil) + env, err := cel.NewEnv( + cel.Variable("event", cel.AnyType), + K8s(objectCache.K8sObjectCache(), config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + ast, issues := env.Compile("k8s.get_container_mount_paths('default', 'test-pod', 'test-container')") + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "event": map[string]interface{}{ + "namespace": "default", + "podName": "test-pod", + "containerName": "test-container", + }, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + // Assert the expected mount paths + expectedMountPaths := []string{"/etc/config", "/var/data"} + actualMountPaths := result.Value().([]string) + + assert.Equal(t, expectedMountPaths, actualMountPaths, "mount paths should match expected values") +} + +func TestK8sLibraryGetContainerByName(t *testing.T) { + t.Setenv("KUBERNETES_SERVICE_HOST", "127.0.0.1") + t.Setenv("KUBERNETES_SERVICE_PORT", "6443") + + mockK8sClient := k8sinterface.NewKubernetesApiMock() + + k8sObjCache, err := k8scache.NewK8sObjectCache("test", mockK8sClient) + if err != nil { + t.Fatalf("failed to create k8s object cache: %v", err) + } + + // Create a test Pod with multiple containers + testPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main-container", + }, + { + Name: "sidecar-container", + }, + }, + InitContainers: []corev1.Container{ + { + Name: "init-container", + }, + }, + }, + } + + // Add the pod to the cache directly + k8sObjCache.AddHandler(context.Background(), testPod) + objectCache := objectcache.NewObjectCache(k8sObjCache, nil, nil, nil) + env, err := cel.NewEnv( + cel.Variable("event", cel.AnyType), + K8s(objectCache.K8sObjectCache(), config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + tests := []struct { + name string + expr string + expectedResult bool + }{ + { + name: "container exists in main containers", + expr: "k8s.get_container_by_name('default', 'test-pod', 'main-container')", + expectedResult: true, + }, + { + name: "container exists in sidecar containers", + expr: "k8s.get_container_by_name('default', 'test-pod', 'sidecar-container')", + expectedResult: true, + }, + { + name: "container exists in init containers", + expr: "k8s.get_container_by_name('default', 'test-pod', 'init-container')", + expectedResult: true, + }, + { + name: "container does not exist", + expr: "k8s.get_container_by_name('default', 'test-pod', 'non-existent-container')", + expectedResult: false, + }, + { + name: "pod does not exist", + expr: "k8s.get_container_by_name('default', 'non-existent-pod', 'main-container')", + expectedResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ast, issues := env.Compile(tt.expr) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "event": map[string]interface{}{ + "namespace": "default", + "podName": "test-pod", + "containerName": "main-container", + }, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tt.expectedResult, actualResult, "container existence check should match expected result") + }) + } +} diff --git a/pkg/rulemanager/cel/libraries/libraries.go b/pkg/rulemanager/cel/libraries/libraries.go new file mode 100644 index 000000000..c6d11967a --- /dev/null +++ b/pkg/rulemanager/cel/libraries/libraries.go @@ -0,0 +1,21 @@ +package libraries + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" +) + +// Library represents a CEL library used by node-agent. +type Library interface { + // SingletonLibrary provides the library name and ensures the library can be safely registered into environments. + cel.SingletonLibrary + + // Types provides all custom types introduced by the library. + Types() []*cel.Type + + // Declarations returns all function Declarations provided by the library. + Declarations() map[string][]cel.FunctionOpt + + // CostEstimator provides a cost estimator for the library. + CostEstimator() checker.CostEstimator +} diff --git a/pkg/rulemanager/cel/libraries/net/net.go b/pkg/rulemanager/cel/libraries/net/net.go new file mode 100644 index 000000000..fce5ac34f --- /dev/null +++ b/pkg/rulemanager/cel/libraries/net/net.go @@ -0,0 +1,50 @@ +package net + +import ( + "bytes" + "net" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +func (l *netLibrary) isPrivateIP(ip ref.Val) ref.Val { + ipStr, ok := ip.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(ip) + } + + parsedIP := net.ParseIP(ipStr) + if parsedIP == nil { + return types.Bool(false) + } + + // Check if IP is localhost + if parsedIP.IsLoopback() { + return types.Bool(true) + } + + // Check if IP is in private IP ranges + privateIPRanges := []struct { + start net.IP + end net.IP + }{ + {net.ParseIP("10.0.0.0"), net.ParseIP("10.255.255.255")}, + {net.ParseIP("172.16.0.0"), net.ParseIP("172.31.255.255")}, + {net.ParseIP("192.168.0.0"), net.ParseIP("192.168.255.255")}, + // Class D (Multicast) + {net.ParseIP("224.0.0.0"), net.ParseIP("239.255.255.255")}, + // Class E (Experimental) + {net.ParseIP("240.0.0.0"), net.ParseIP("255.255.255.255")}, + // APIPA (sometimes used for local dns) + {net.ParseIP("169.254.0.0"), net.ParseIP("169.254.255.255")}, + } + + for _, r := range privateIPRanges { + if bytes.Compare(parsedIP, r.start) >= 0 && bytes.Compare(parsedIP, r.end) <= 0 { + return types.Bool(true) + } + } + + return types.Bool(false) +} diff --git a/pkg/rulemanager/cel/libraries/net/net_test.go b/pkg/rulemanager/cel/libraries/net/net_test.go new file mode 100644 index 000000000..acc15e158 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/net/net_test.go @@ -0,0 +1,185 @@ +package net + +import ( + "testing" + + "github.com/google/cel-go/cel" + "github.com/kubescape/node-agent/pkg/config" + "github.com/stretchr/testify/assert" +) + +func TestNetLibrary(t *testing.T) { + env, err := cel.NewEnv( + cel.Variable("event", cel.AnyType), + Net(config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + tests := []struct { + name string + expr string + expected bool + }{ + { + name: "localhost IPv4", + expr: "net.is_private_ip('127.0.0.1')", + expected: true, + }, + { + name: "localhost IPv6", + expr: "net.is_private_ip('::1')", + expected: true, + }, + { + name: "private IP 10.x.x.x", + expr: "net.is_private_ip('10.0.0.1')", + expected: true, + }, + { + name: "private IP 172.16.x.x", + expr: "net.is_private_ip('172.16.0.1')", + expected: true, + }, + { + name: "private IP 172.31.x.x", + expr: "net.is_private_ip('172.31.255.255')", + expected: true, + }, + { + name: "private IP 192.168.x.x", + expr: "net.is_private_ip('192.168.1.1')", + expected: true, + }, + { + name: "multicast IP", + expr: "net.is_private_ip('224.0.0.1')", + expected: true, + }, + { + name: "experimental IP", + expr: "net.is_private_ip('240.0.0.1')", + expected: true, + }, + { + name: "APIPA IP", + expr: "net.is_private_ip('169.254.1.1')", + expected: true, + }, + { + name: "public IP", + expr: "net.is_private_ip('8.8.8.8')", + expected: false, + }, + { + name: "another public IP", + expr: "net.is_private_ip('1.1.1.1')", + expected: false, + }, + { + name: "invalid IP", + expr: "net.is_private_ip('invalid-ip')", + expected: false, + }, + { + name: "empty string", + expr: "net.is_private_ip('')", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ast, issues := env.Compile(tt.expr) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "event": map[string]interface{}{ + "ip": "test", + }, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actual, ok := result.Value().(bool) + if !ok { + t.Fatalf("expected bool result, got %T", result.Value()) + } + + assert.Equal(t, tt.expected, actual, "result should match expected value") + }) + } +} + +func TestNetLibraryErrorCases(t *testing.T) { + env, err := cel.NewEnv( + cel.Variable("event", cel.AnyType), + Net(config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + tests := []struct { + name string + expr string + expectError bool + }{ + { + name: "wrong number of arguments", + expr: "net.is_private_ip()", + expectError: true, + }, + { + name: "too many arguments", + expr: "net.is_private_ip('127.0.0.1', 'extra')", + expectError: true, + }, + { + name: "wrong argument type", + expr: "net.is_private_ip(123)", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ast, issues := env.Compile(tt.expr) + if issues != nil { + if tt.expectError { + return // Expected compilation error + } + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + if tt.expectError { + return // Expected program creation error + } + t.Fatalf("failed to create program: %v", err) + } + + _, _, err = program.Eval(map[string]interface{}{ + "event": map[string]interface{}{ + "ip": "test", + }, + }) + if err != nil && tt.expectError { + return // Expected evaluation error + } + if err != nil && !tt.expectError { + t.Fatalf("unexpected error during evaluation: %v", err) + } + }) + } +} diff --git a/pkg/rulemanager/cel/libraries/net/netlib.go b/pkg/rulemanager/cel/libraries/net/netlib.go new file mode 100644 index 000000000..5616ad3a6 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/net/netlib.go @@ -0,0 +1,93 @@ +package net + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" +) + +func New(config config.Config) libraries.Library { + return &netLibrary{ + functionCache: cache.NewFunctionCache(cache.FunctionCacheConfig{ + MaxSize: config.CelConfigCache.MaxSize, + TTL: config.CelConfigCache.TTL, + }), + } +} + +func Net(config config.Config) cel.EnvOption { + return cel.Lib(New(config)) +} + +type netLibrary struct { + functionCache *cache.FunctionCache +} + +func (l *netLibrary) LibraryName() string { + return "net" +} + +func (l *netLibrary) Types() []*cel.Type { + return []*cel.Type{} +} + +func (l *netLibrary) Declarations() map[string][]cel.FunctionOpt { + return map[string][]cel.FunctionOpt{ + "net.is_private_ip": { + cel.Overload( + "net_is_private_ip", []*cel.Type{cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 1 { + return types.NewErr("expected 1 argument, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.isPrivateIP(args[0]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "net.is_private_ip") + return cachedFunc(values[0]) + }), + ), + }, + } +} + +func (l *netLibrary) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{} + for name, overloads := range l.Declarations() { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (l *netLibrary) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func (l *netLibrary) CostEstimator() checker.CostEstimator { + return &netCostEstimator{} +} + +// netCostEstimator implements the checker.CostEstimator for the 'net' library. +type netCostEstimator struct{} + +func (e *netCostEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + cost := int64(0) + switch function { + case "net.is_private_ip": + // IP parsing O(1) + byte comparison across 6 IP ranges O(6) = O(1) + cost = 8 + } + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: uint64(cost), Max: uint64(cost)}} +} + +func (e *netCostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + return nil // Not providing size estimates for now. +} + +// Ensure the implementation satisfies the interface +var _ checker.CostEstimator = (*netCostEstimator)(nil) +var _ libraries.Library = (*netLibrary)(nil) diff --git a/pkg/rulemanager/cel/libraries/networkneighborhood/cache_test.go b/pkg/rulemanager/cel/libraries/networkneighborhood/cache_test.go new file mode 100644 index 000000000..d0d5bb79e --- /dev/null +++ b/pkg/rulemanager/cel/libraries/networkneighborhood/cache_test.go @@ -0,0 +1,661 @@ +package networkneighborhood + +import ( + "testing" + "time" + + "github.com/google/cel-go/cel" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/objectcache" + objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" +) + +func TestNetworkNeighborhoodCaching(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + DNSNames: []string{"api.example.com"}, + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-80", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(80)), + }, + }, + }, + { + IPAddress: "10.0.0.50", + DNSNames: []string{"database.internal"}, + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-5432", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(5432)), + }, + }, + }, + }, + Ingress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "172.16.0.10", + DNSNames: []string{"loadbalancer.example.com"}, + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-8080", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(8080)), + }, + }, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + // Create library with cache + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("address", cel.StringType), + cel.Variable("domain", cel.StringType), + cel.Variable("port", cel.IntType), + cel.Variable("protocol", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + testCases := []struct { + name string + expression string + vars map[string]interface{} + expected bool + }{ + { + name: "was_address_in_egress caching", + expression: `nn.was_address_in_egress(containerID, address)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "address": "192.168.1.100", + }, + expected: true, + }, + { + name: "was_address_in_ingress caching", + expression: `nn.was_address_in_ingress(containerID, address)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "address": "172.16.0.10", + }, + expected: true, + }, + { + name: "is_domain_in_egress caching", + expression: `nn.is_domain_in_egress(containerID, domain)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "domain": "api.example.com", + }, + expected: true, + }, + { + name: "is_domain_in_ingress caching", + expression: `nn.is_domain_in_ingress(containerID, domain)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "domain": "loadbalancer.example.com", + }, + expected: true, + }, + { + name: "was_address_port_protocol_in_egress caching", + expression: `nn.was_address_port_protocol_in_egress(containerID, address, port, protocol)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "address": "192.168.1.100", + "port": int64(80), + "protocol": "TCP", + }, + expected: true, + }, + { + name: "was_address_port_protocol_in_ingress caching", + expression: `nn.was_address_port_protocol_in_ingress(containerID, address, port, protocol)`, + vars: map[string]interface{}{ + "containerID": "test-container-id", + "address": "172.16.0.10", + "port": int64(8080), + "protocol": "TCP", + }, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(tc.expression) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + // Initial cache should be empty + initialCacheSize := lib.functionCache.GetCacheStats() + + // First call - should cache the result + result1, _, err := program.Eval(tc.vars) + assert.NoError(t, err) + assert.Equal(t, tc.expected, result1.Value()) + + // Cache should have one more entry + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, initialCacheSize+1, cacheSize1, "Cache should have one new entry after first call") + + // Second call with same parameters - should use cache + result2, _, err := program.Eval(tc.vars) + assert.NoError(t, err) + assert.Equal(t, tc.expected, result2.Value()) + + // Cache size should remain the same (cache hit) + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, cacheSize1, cacheSize2, "Cache size should not increase on cache hit") + + // Verify results are identical + assert.Equal(t, result1.Value(), result2.Value(), "Cached and non-cached results should be identical") + }) + } +} + +func TestNetworkNeighborhoodCacheDifferentArguments(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + DNSNames: []string{"api.example.com"}, + }, + { + IPAddress: "10.0.0.50", + DNSNames: []string{"database.internal"}, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("address", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + ast, issues := env.Compile(`nn.was_address_in_egress(containerID, address)`) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + // Call with first address + result1, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "address": "192.168.1.100", + }) + assert.NoError(t, err) + assert.True(t, result1.Value().(bool)) + + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize1, "Cache should have 1 entry") + + // Call with second address - should create new cache entry + result2, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "address": "10.0.0.50", + }) + assert.NoError(t, err) + assert.True(t, result2.Value().(bool)) + + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, 2, cacheSize2, "Cache should have 2 entries for different arguments") + + // Call with non-existent address - should create third cache entry + result3, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + "address": "1.1.1.1", + }) + assert.NoError(t, err) + assert.False(t, result3.Value().(bool)) + + cacheSize3 := lib.functionCache.GetCacheStats() + assert.Equal(t, 3, cacheSize3, "Cache should have 3 entries for different arguments") +} + +func TestNetworkNeighborhoodCacheExpiration(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + DNSNames: []string{"api.example.com"}, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + // Create cache with short TTL for testing + config := cache.FunctionCacheConfig{ + MaxSize: 100, + TTL: 50 * time.Millisecond, + } + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(config), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("address", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + ast, issues := env.Compile(`nn.was_address_in_egress(containerID, address)`) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + vars := map[string]interface{}{ + "containerID": "test-container-id", + "address": "192.168.1.100", + } + + // First call + result1, _, err := program.Eval(vars) + assert.NoError(t, err) + assert.True(t, result1.Value().(bool)) + + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize1, "Cache should have 1 entry") + + // Wait for cache to expire + time.Sleep(100 * time.Millisecond) + + // Second call after expiration - cache should be empty + result2, _, err := program.Eval(vars) + assert.NoError(t, err) + assert.True(t, result2.Value().(bool)) + + // Cache should have been repopulated + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize2, "Cache should be repopulated after expiration") +} + +func TestNetworkNeighborhoodCachePerformance(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + DNSNames: []string{"api.example.com"}, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("address", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + ast, issues := env.Compile(`nn.was_address_in_egress(containerID, address)`) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + vars := map[string]interface{}{ + "containerID": "test-container-id", + "address": "192.168.1.100", + } + + // Measure time for first call (cache miss) + start1 := time.Now() + result1, _, err := program.Eval(vars) + duration1 := time.Since(start1) + assert.NoError(t, err) + assert.True(t, result1.Value().(bool)) + + // Measure time for second call (cache hit) + start2 := time.Now() + result2, _, err := program.Eval(vars) + duration2 := time.Since(start2) + assert.NoError(t, err) + assert.True(t, result2.Value().(bool)) + + // Cache hit should be faster (though this is not guaranteed in all environments) + t.Logf("First call (cache miss): %v", duration1) + t.Logf("Second call (cache hit): %v", duration2) + + // Verify results are the same + assert.Equal(t, result1.Value(), result2.Value()) + + // Verify cache was used + cacheSize := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize, "Cache should contain 1 entry") +} + +func TestNetworkNeighborhoodCacheMultipleFunctions(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + DNSNames: []string{"api.example.com"}, + }, + }, + Ingress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "172.16.0.10", + DNSNames: []string{"loadbalancer.example.com"}, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + // Test multiple functions are cached independently + testExpressions := []struct { + expression string + expected bool + }{ + {`nn.was_address_in_egress(containerID, "192.168.1.100")`, true}, + {`nn.was_address_in_ingress(containerID, "172.16.0.10")`, true}, + {`nn.is_domain_in_egress(containerID, "api.example.com")`, true}, + {`nn.is_domain_in_ingress(containerID, "loadbalancer.example.com")`, true}, + } + + for i, tc := range testExpressions { + ast, issues := env.Compile(tc.expression) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + // First call - should cache the result + result1, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + }) + assert.NoError(t, err) + assert.Equal(t, tc.expected, result1.Value()) + + // Cache should have i+1 entries + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, i+1, cacheSize1, "Cache should have %d entries", i+1) + + // Second call with same parameters - should use cache + result2, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + }) + assert.NoError(t, err) + assert.Equal(t, tc.expected, result2.Value()) + + // Cache size should remain the same (cache hit) + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, cacheSize1, cacheSize2, "Cache size should not increase on cache hit") + } +} + +func TestNetworkNeighborhoodCacheClearCache(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + DNSNames: []string{"api.example.com"}, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("address", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + ast, issues := env.Compile(`nn.was_address_in_egress(containerID, address)`) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + vars := map[string]interface{}{ + "containerID": "test-container-id", + "address": "192.168.1.100", + } + + // First call - populate cache + result1, _, err := program.Eval(vars) + assert.NoError(t, err) + assert.True(t, result1.Value().(bool)) + + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize1, "Cache should have 1 entry") + + // Clear cache + lib.functionCache.ClearCache() + + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, 0, cacheSize2, "Cache should be empty after clear") + + // Call again - should repopulate cache + result2, _, err := program.Eval(vars) + assert.NoError(t, err) + assert.True(t, result2.Value().(bool)) + + cacheSize3 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize3, "Cache should have 1 entry after repopulation") +} + +func TestNetworkNeighborhoodCacheKeyGeneration(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + DNSNames: []string{"api.example.com"}, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + cel.Variable("address", cel.StringType), + cel.Lib(lib), + ) + assert.NoError(t, err) + + ast, issues := env.Compile(`nn.was_address_in_egress(containerID, address)`) + assert.NoError(t, issues.Err()) + + program, err := env.Program(ast) + assert.NoError(t, err) + + // Test that same arguments in different order produce same cache result + // (cache key generation should be order-independent where possible) + vars1 := map[string]interface{}{ + "containerID": "test-container-id", + "address": "192.168.1.100", + } + + // First call + result1, _, err := program.Eval(vars1) + assert.NoError(t, err) + assert.True(t, result1.Value().(bool)) + + cacheSize1 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize1, "Cache should have 1 entry") + + // Second call with same values + result2, _, err := program.Eval(vars1) + assert.NoError(t, err) + assert.True(t, result2.Value().(bool)) + + // Should still be 1 entry (cache hit) + cacheSize2 := lib.functionCache.GetCacheStats() + assert.Equal(t, 1, cacheSize2, "Cache should still have 1 entry after cache hit") +} diff --git a/pkg/rulemanager/cel/libraries/networkneighborhood/integration_test.go b/pkg/rulemanager/cel/libraries/networkneighborhood/integration_test.go new file mode 100644 index 000000000..b09c04138 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/networkneighborhood/integration_test.go @@ -0,0 +1,272 @@ +package networkneighborhood + +import ( + "testing" + "time" + + "github.com/google/cel-go/cel" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" +) + +func TestIntegrationWithAllNetworkFunctions(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + DNSNames: []string{"api.example.com", "api2.example.com"}, + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-80", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(80)), + }, + { + Name: "tcp-443", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(443)), + }, + }, + }, + { + IPAddress: "10.0.0.50", + DNSNames: []string{"database.internal"}, + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-5432", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(5432)), + }, + }, + }, + { + IPAddress: "8.8.8.8", + DNSNames: []string{"dns.google.com"}, + Ports: []v1beta1.NetworkPort{ + { + Name: "udp-53", + Protocol: v1beta1.Protocol("UDP"), + Port: ptr.To(int32(53)), + }, + }, + }, + }, + Ingress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "172.16.0.10", + DNSNames: []string{"loadbalancer.example.com", "lb.example.com"}, + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-8080", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(8080)), + }, + { + Name: "tcp-9090", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(9090)), + }, + }, + }, + { + IPAddress: "10.0.0.20", + DNSNames: []string{"monitoring.internal"}, + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-3000", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(3000)), + }, + }, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + env, err := cel.NewEnv( + cel.Variable("containerID", cel.StringType), + NN(&objCache, config.Config{ + CelConfigCache: cache.FunctionCacheConfig{ + MaxSize: 1000, + TTL: 1 * time.Minute, + }, + }), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + testCases := []struct { + name string + expression string + expectedResult bool + }{ + { + name: "Check egress address", + expression: `nn.was_address_in_egress(containerID, "192.168.1.100")`, + expectedResult: true, + }, + { + name: "Check ingress address", + expression: `nn.was_address_in_ingress(containerID, "172.16.0.10")`, + expectedResult: true, + }, + { + name: "Check egress domain", + expression: `nn.is_domain_in_egress(containerID, "api.example.com")`, + expectedResult: true, + }, + { + name: "Check ingress domain", + expression: `nn.is_domain_in_ingress(containerID, "loadbalancer.example.com")`, + expectedResult: true, + }, + { + name: "Complex network check - external communication", + expression: `nn.was_address_in_egress(containerID, "8.8.8.8") && nn.is_domain_in_egress(containerID, "dns.google.com")`, + expectedResult: true, + }, + { + name: "Complex network check - internal communication", + expression: `nn.was_address_in_egress(containerID, "10.0.0.50") && nn.is_domain_in_egress(containerID, "database.internal")`, + expectedResult: true, + }, + { + name: "Complex network check - load balancer access", + expression: `nn.was_address_in_ingress(containerID, "172.16.0.10") && nn.is_domain_in_ingress(containerID, "lb.example.com")`, + expectedResult: true, + }, + { + name: "Check non-existent network communication", + expression: `nn.was_address_in_egress(containerID, "192.168.1.200") || nn.is_domain_in_ingress(containerID, "nonexistent.example.com")`, + expectedResult: false, + }, + { + name: "Mixed valid and invalid checks", + expression: `nn.was_address_in_egress(containerID, "192.168.1.100") && nn.was_address_in_egress(containerID, "192.168.1.200")`, + expectedResult: false, + }, + { + name: "Multiple valid egress checks", + expression: `nn.was_address_in_egress(containerID, "192.168.1.100") || nn.was_address_in_egress(containerID, "10.0.0.50")`, + expectedResult: true, + }, + { + name: "Check egress address with port and protocol - TCP 80", + expression: `nn.was_address_port_protocol_in_egress(containerID, "192.168.1.100", 80, "TCP")`, + expectedResult: true, + }, + { + name: "Check egress address with port and protocol - TCP 443", + expression: `nn.was_address_port_protocol_in_egress(containerID, "192.168.1.100", 443, "TCP")`, + expectedResult: true, + }, + { + name: "Check egress address with port and protocol - UDP 53", + expression: `nn.was_address_port_protocol_in_egress(containerID, "8.8.8.8", 53, "UDP")`, + expectedResult: true, + }, + { + name: "Check egress address with port and protocol - database", + expression: `nn.was_address_port_protocol_in_egress(containerID, "10.0.0.50", 5432, "TCP")`, + expectedResult: true, + }, + { + name: "Check ingress address with port and protocol - TCP 8080", + expression: `nn.was_address_port_protocol_in_ingress(containerID, "172.16.0.10", 8080, "TCP")`, + expectedResult: true, + }, + { + name: "Check ingress address with port and protocol - TCP 9090", + expression: `nn.was_address_port_protocol_in_ingress(containerID, "172.16.0.10", 9090, "TCP")`, + expectedResult: true, + }, + { + name: "Check ingress address with port and protocol - monitoring", + expression: `nn.was_address_port_protocol_in_ingress(containerID, "10.0.0.20", 3000, "TCP")`, + expectedResult: true, + }, + { + name: "Check non-existent egress address with port and protocol", + expression: `nn.was_address_port_protocol_in_egress(containerID, "192.168.1.100", 9999, "TCP")`, + expectedResult: false, + }, + { + name: "Check non-existent ingress address with port and protocol", + expression: `nn.was_address_port_protocol_in_ingress(containerID, "172.16.0.10", 9999, "TCP")`, + expectedResult: false, + }, + { + name: "Check wrong protocol for existing address and port", + expression: `nn.was_address_port_protocol_in_egress(containerID, "192.168.1.100", 80, "UDP")`, + expectedResult: false, + }, + { + name: "Check wrong protocol for existing ingress address and port", + expression: `nn.was_address_port_protocol_in_ingress(containerID, "172.16.0.10", 8080, "UDP")`, + expectedResult: false, + }, + { + name: "Complex network check with port and protocol - egress", + expression: `nn.was_address_port_protocol_in_egress(containerID, "192.168.1.100", 80, "TCP") && nn.was_address_port_protocol_in_egress(containerID, "8.8.8.8", 53, "UDP")`, + expectedResult: true, + }, + { + name: "Complex network check with port and protocol - ingress", + expression: `nn.was_address_port_protocol_in_ingress(containerID, "172.16.0.10", 8080, "TCP") && nn.was_address_port_protocol_in_ingress(containerID, "10.0.0.20", 3000, "TCP")`, + expectedResult: true, + }, + { + name: "Mixed valid and invalid port protocol checks", + expression: `nn.was_address_port_protocol_in_egress(containerID, "192.168.1.100", 80, "TCP") && nn.was_address_port_protocol_in_egress(containerID, "192.168.1.100", 9999, "TCP")`, + expectedResult: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ast, issues := env.Compile(tc.expression) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "containerID": "test-container-id", + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actualResult := result.Value().(bool) + assert.Equal(t, tc.expectedResult, actualResult, "Expression result should match expected value for: %s", tc.expression) + }) + } +} diff --git a/pkg/rulemanager/cel/libraries/networkneighborhood/network.go b/pkg/rulemanager/cel/libraries/networkneighborhood/network.go new file mode 100644 index 000000000..7ceba9c80 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/networkneighborhood/network.go @@ -0,0 +1,202 @@ +package networkneighborhood + +import ( + "slices" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/rulemanager/profilehelper" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +func (l *nnLibrary) wasAddressInEgress(containerID, address ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + addressStr, ok := address.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(address) + } + + container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, egress := range container.Egress { + if egress.IPAddress == addressStr { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +func (l *nnLibrary) wasAddressInIngress(containerID, address ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + addressStr, ok := address.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(address) + } + + container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, ingress := range container.Ingress { + if ingress.IPAddress == addressStr { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +func (l *nnLibrary) isDomainInEgress(containerID, domain ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + domainStr, ok := domain.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(domain) + } + + container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, egress := range container.Egress { + if slices.Contains(egress.DNSNames, domainStr) || egress.DNS == domainStr { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +func (l *nnLibrary) isDomainInIngress(containerID, domain ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + domainStr, ok := domain.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(domain) + } + + container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, ingress := range container.Ingress { + if slices.Contains(ingress.DNSNames, domainStr) { + return types.Bool(true) + } + } + + return types.Bool(false) +} + +func (l *nnLibrary) wasAddressPortProtocolInEgress(containerID, address, port, protocol ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + addressStr, ok := address.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(address) + } + portInt, ok := port.Value().(int64) + if !ok { + return types.MaybeNoSuchOverloadErr(port) + } + protocolStr, ok := protocol.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(protocol) + } + + container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, egress := range container.Egress { + if egress.IPAddress == addressStr { + for _, portInfo := range egress.Ports { + if portInfo.Protocol == v1beta1.Protocol(protocolStr) && portInfo.Port != nil && *portInfo.Port == int32(portInt) { + return types.Bool(true) + } + } + } + } + + return types.Bool(false) +} + +func (l *nnLibrary) wasAddressPortProtocolInIngress(containerID, address, port, protocol ref.Val) ref.Val { + if l.objectCache == nil { + return types.NewErr("objectCache is nil") + } + + containerIDStr, ok := containerID.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(containerID) + } + addressStr, ok := address.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(address) + } + portInt, ok := port.Value().(int64) + if !ok { + return types.MaybeNoSuchOverloadErr(port) + } + protocolStr, ok := protocol.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(protocol) + } + + container, err := profilehelper.GetContainerNetworkNeighborhood(l.objectCache, containerIDStr) + if err != nil { + return types.Bool(false) + } + + for _, ingress := range container.Ingress { + if ingress.IPAddress == addressStr { + for _, portInfo := range ingress.Ports { + if portInfo.Protocol == v1beta1.Protocol(protocolStr) && portInfo.Port != nil && *portInfo.Port == int32(portInt) { + return types.Bool(true) + } + } + } + } + + return types.Bool(false) +} diff --git a/pkg/rulemanager/cel/libraries/networkneighborhood/network_test.go b/pkg/rulemanager/cel/libraries/networkneighborhood/network_test.go new file mode 100644 index 000000000..a5cb0bbad --- /dev/null +++ b/pkg/rulemanager/cel/libraries/networkneighborhood/network_test.go @@ -0,0 +1,420 @@ +package networkneighborhood + +import ( + "testing" + + "github.com/google/cel-go/common/types" + "github.com/goradd/maps" + "github.com/kubescape/node-agent/pkg/objectcache" + objectcachev1 "github.com/kubescape/node-agent/pkg/objectcache/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" +) + +func TestWasAddressPortProtocolInEgress(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-80", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(80)), + }, + { + Name: "tcp-443", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(443)), + }, + }, + }, + { + IPAddress: "8.8.8.8", + Ports: []v1beta1.NetworkPort{ + { + Name: "udp-53", + Protocol: v1beta1.Protocol("UDP"), + Port: ptr.To(int32(53)), + }, + }, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + testCases := []struct { + name string + containerID string + address string + port int64 + protocol string + expectedResult bool + }{ + { + name: "Valid TCP port 80", + containerID: "test-container-id", + address: "192.168.1.100", + port: 80, + protocol: "TCP", + expectedResult: true, + }, + { + name: "Valid TCP port 443", + containerID: "test-container-id", + address: "192.168.1.100", + port: 443, + protocol: "TCP", + expectedResult: true, + }, + { + name: "Valid UDP port 53", + containerID: "test-container-id", + address: "8.8.8.8", + port: 53, + protocol: "UDP", + expectedResult: true, + }, + { + name: "Invalid port", + containerID: "test-container-id", + address: "192.168.1.100", + port: 9999, + protocol: "TCP", + expectedResult: false, + }, + { + name: "Invalid protocol", + containerID: "test-container-id", + address: "192.168.1.100", + port: 80, + protocol: "UDP", + expectedResult: false, + }, + { + name: "Invalid address", + containerID: "test-container-id", + address: "192.168.1.200", + port: 80, + protocol: "TCP", + expectedResult: false, + }, + { + name: "Invalid container ID", + containerID: "invalid-container-id", + address: "192.168.1.100", + port: 80, + protocol: "TCP", + expectedResult: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := lib.wasAddressPortProtocolInEgress( + types.String(tc.containerID), + types.String(tc.address), + types.Int(tc.port), + types.String(tc.protocol), + ) + assert.Equal(t, types.Bool(tc.expectedResult), result) + }) + } +} + +func TestWasAddressPortProtocolInIngress(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Ingress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "172.16.0.10", + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-8080", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(8080)), + }, + { + Name: "tcp-9090", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(9090)), + }, + }, + }, + { + IPAddress: "10.0.0.20", + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-3000", + Protocol: v1beta1.Protocol("TCP"), + Port: ptr.To(int32(3000)), + }, + }, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + testCases := []struct { + name string + containerID string + address string + port int64 + protocol string + expectedResult bool + }{ + { + name: "Valid TCP port 8080", + containerID: "test-container-id", + address: "172.16.0.10", + port: 8080, + protocol: "TCP", + expectedResult: true, + }, + { + name: "Valid TCP port 9090", + containerID: "test-container-id", + address: "172.16.0.10", + port: 9090, + protocol: "TCP", + expectedResult: true, + }, + { + name: "Valid TCP port 3000", + containerID: "test-container-id", + address: "10.0.0.20", + port: 3000, + protocol: "TCP", + expectedResult: true, + }, + { + name: "Invalid port", + containerID: "test-container-id", + address: "172.16.0.10", + port: 9999, + protocol: "TCP", + expectedResult: false, + }, + { + name: "Invalid protocol", + containerID: "test-container-id", + address: "172.16.0.10", + port: 8080, + protocol: "UDP", + expectedResult: false, + }, + { + name: "Invalid address", + containerID: "test-container-id", + address: "172.16.0.20", + port: 8080, + protocol: "TCP", + expectedResult: false, + }, + { + name: "Invalid container ID", + containerID: "invalid-container-id", + address: "172.16.0.10", + port: 8080, + protocol: "TCP", + expectedResult: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := lib.wasAddressPortProtocolInIngress( + types.String(tc.containerID), + types.String(tc.address), + types.Int(tc.port), + types.String(tc.protocol), + ) + assert.Equal(t, types.Bool(tc.expectedResult), result) + }) + } +} + +func TestWasAddressPortProtocolWithNilObjectCache(t *testing.T) { + lib := &nnLibrary{ + objectCache: nil, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + result := lib.wasAddressPortProtocolInEgress( + types.String("test-container-id"), + types.String("192.168.1.100"), + types.Int(80), + types.String("TCP"), + ) + assert.True(t, types.IsError(result)) + + result = lib.wasAddressPortProtocolInIngress( + types.String("test-container-id"), + types.String("172.16.0.10"), + types.Int(8080), + types.String("TCP"), + ) + assert.True(t, types.IsError(result)) +} + +func TestWasAddressPortProtocolWithInvalidTypes(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + // Test with invalid containerID type + result := lib.wasAddressPortProtocolInEgress( + types.Int(123), // Should be string + types.String("192.168.1.100"), + types.Int(80), + types.String("TCP"), + ) + assert.True(t, types.IsError(result)) + + // Test with invalid address type + result = lib.wasAddressPortProtocolInEgress( + types.String("test-container-id"), + types.Int(123), // Should be string + types.Int(80), + types.String("TCP"), + ) + assert.True(t, types.IsError(result)) + + // Test with invalid port type + result = lib.wasAddressPortProtocolInEgress( + types.String("test-container-id"), + types.String("192.168.1.100"), + types.String("80"), // Should be int + types.String("TCP"), + ) + assert.True(t, types.IsError(result)) + + // Test with invalid protocol type + result = lib.wasAddressPortProtocolInEgress( + types.String("test-container-id"), + types.String("192.168.1.100"), + types.Int(80), + types.Int(123), // Should be string + ) + assert.True(t, types.IsError(result)) +} + +func TestWasAddressPortProtocolWithNilPort(t *testing.T) { + objCache := objectcachev1.RuleObjectCacheMock{ + ContainerIDToSharedData: maps.NewSafeMap[string, *objectcache.WatchedContainerData](), + } + + objCache.SetSharedContainerData("test-container-id", &objectcache.WatchedContainerData{ + ContainerType: objectcache.Container, + ContainerInfos: map[objectcache.ContainerType][]objectcache.ContainerInfo{ + objectcache.Container: { + { + Name: "test-container", + }, + }, + }, + }) + + nn := &v1beta1.NetworkNeighborhood{} + nn.Spec.Containers = append(nn.Spec.Containers, v1beta1.NetworkNeighborhoodContainer{ + Name: "test-container", + Egress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "192.168.1.100", + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-80", + Protocol: v1beta1.Protocol("TCP"), + Port: nil, // Nil port + }, + }, + }, + }, + Ingress: []v1beta1.NetworkNeighbor{ + { + IPAddress: "172.16.0.10", + Ports: []v1beta1.NetworkPort{ + { + Name: "tcp-8080", + Protocol: v1beta1.Protocol("TCP"), + Port: nil, // Nil port + }, + }, + }, + }, + }) + objCache.SetNetworkNeighborhood(nn) + + lib := &nnLibrary{ + objectCache: &objCache, + functionCache: cache.NewFunctionCache(cache.DefaultFunctionCacheConfig()), + } + + // Test egress with nil port + result := lib.wasAddressPortProtocolInEgress( + types.String("test-container-id"), + types.String("192.168.1.100"), + types.Int(80), + types.String("TCP"), + ) + assert.Equal(t, types.Bool(false), result) + + // Test ingress with nil port + result = lib.wasAddressPortProtocolInIngress( + types.String("test-container-id"), + types.String("172.16.0.10"), + types.Int(8080), + types.String("TCP"), + ) + assert.Equal(t, types.Bool(false), result) +} diff --git a/pkg/rulemanager/cel/libraries/networkneighborhood/nn.go b/pkg/rulemanager/cel/libraries/networkneighborhood/nn.go new file mode 100644 index 000000000..d4e857a80 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/networkneighborhood/nn.go @@ -0,0 +1,179 @@ +package networkneighborhood + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" +) + +func New(objectCache objectcache.ObjectCache, config config.Config) libraries.Library { + return &nnLibrary{ + objectCache: objectCache, + functionCache: cache.NewFunctionCache(cache.FunctionCacheConfig{ + MaxSize: config.CelConfigCache.MaxSize, + TTL: config.CelConfigCache.TTL, + }), + } +} + +func NN(objectCache objectcache.ObjectCache, config config.Config) cel.EnvOption { + return cel.Lib(New(objectCache, config)) +} + +type nnLibrary struct { + objectCache objectcache.ObjectCache + functionCache *cache.FunctionCache +} + +func (l *nnLibrary) LibraryName() string { + return "nn" +} + +func (l *nnLibrary) Types() []*cel.Type { + return []*cel.Type{} +} + +func (l *nnLibrary) Declarations() map[string][]cel.FunctionOpt { + return map[string][]cel.FunctionOpt{ + "nn.was_address_in_egress": { + cel.Overload( + "nn_was_address_in_egress", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasAddressInEgress(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "nn.was_address_in_egress") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "nn.was_address_in_ingress": { + cel.Overload( + "nn_was_address_in_ingress", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasAddressInIngress(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "nn.was_address_in_ingress") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "nn.is_domain_in_egress": { + cel.Overload( + "nn_is_domain_in_egress", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + result := l.isDomainInEgress(args[0], args[1]) + return result + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "nn.is_domain_in_egress") + result := cachedFunc(values[0], values[1]) + return result + }), + ), + }, + "nn.is_domain_in_ingress": { + cel.Overload( + "nn_is_domain_in_ingress", []*cel.Type{cel.StringType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.isDomainInIngress(args[0], args[1]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "nn.is_domain_in_ingress") + return cachedFunc(values[0], values[1]) + }), + ), + }, + "nn.was_address_port_protocol_in_egress": { + cel.Overload( + "nn_was_address_port_protocol_in_egress", []*cel.Type{cel.StringType, cel.StringType, cel.IntType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 4 { + return types.NewErr("expected 4 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasAddressPortProtocolInEgress(args[0], args[1], args[2], args[3]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "nn.was_address_port_protocol_in_egress") + return cachedFunc(values[0], values[1], values[2], values[3]) + }), + ), + }, + "nn.was_address_port_protocol_in_ingress": { + cel.Overload( + "nn_was_address_port_protocol_in_ingress", []*cel.Type{cel.StringType, cel.StringType, cel.IntType, cel.StringType}, cel.BoolType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 4 { + return types.NewErr("expected 4 arguments, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.wasAddressPortProtocolInIngress(args[0], args[1], args[2], args[3]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "nn.was_address_port_protocol_in_ingress") + return cachedFunc(values[0], values[1], values[2], values[3]) + }), + ), + }, + } +} + +func (l *nnLibrary) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{} + for name, overloads := range l.Declarations() { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (l *nnLibrary) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func (l *nnLibrary) CostEstimator() checker.CostEstimator { + return &nnCostEstimator{} +} + +// nnCostEstimator implements the checker.CostEstimator for the 'nn' library. +type nnCostEstimator struct{} + +func (e *nnCostEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + cost := int64(0) + switch function { + case "nn.was_address_in_egress", "nn.was_address_in_ingress": + // Cache lookup + O(n) linear search through egress/ingress list + cost = 20 + case "nn.is_domain_in_egress", "nn.is_domain_in_ingress": + // Cache lookup + O(n) list iteration + O(m) slice.Contains on DNS names per entry + cost = 35 + case "nn.was_address_port_protocol_in_egress", "nn.was_address_port_protocol_in_ingress": + // Cache lookup + O(n) address search + O(p) nested port/protocol matching + cost = 45 + } + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: uint64(cost), Max: uint64(cost)}} +} + +func (e *nnCostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + return nil // Not providing size estimates for now. +} + +// Ensure the implementation satisfies the interface +var _ checker.CostEstimator = (*nnCostEstimator)(nil) +var _ libraries.Library = (*nnLibrary)(nil) diff --git a/pkg/rulemanager/cel/libraries/parse/integration_test.go b/pkg/rulemanager/cel/libraries/parse/integration_test.go new file mode 100644 index 000000000..e7a76c693 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/parse/integration_test.go @@ -0,0 +1,73 @@ +package parse + +import ( + "testing" + + "github.com/google/cel-go/cel" + "github.com/kubescape/node-agent/pkg/config" + "github.com/stretchr/testify/assert" +) + +func TestParseLibraryIntegration(t *testing.T) { + // Create CEL environment with parse library + env, err := cel.NewEnv( + cel.Variable("data", cel.AnyType), + Parse(config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create CEL environment: %v", err) + } + + tests := []struct { + name string + expr string + expected string + }{ + { + name: "get exec path with args", + expr: "parse.get_exec_path(['/bin/ls', '-la'], 'ls')", + expected: "/bin/ls", + }, + { + name: "get exec path with empty first arg", + expr: "parse.get_exec_path(['', '-la'], 'ls')", + expected: "ls", + }, + { + name: "get exec path with empty args", + expr: "parse.get_exec_path([], 'python')", + expected: "python", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ast, issues := env.Compile(tt.expr) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "data": map[string]interface{}{ + "args": []string{}, + "comm": "test", + }, + }) + if err != nil { + t.Fatalf("failed to evaluate expression: %v", err) + } + + actual, ok := result.Value().(string) + if !ok { + t.Fatalf("expected string result, got %T", result.Value()) + } + + assert.Equal(t, tt.expected, actual, "result should match expected value") + }) + } +} diff --git a/pkg/rulemanager/cel/libraries/parse/parse.go b/pkg/rulemanager/cel/libraries/parse/parse.go new file mode 100644 index 000000000..ba82f982f --- /dev/null +++ b/pkg/rulemanager/cel/libraries/parse/parse.go @@ -0,0 +1,27 @@ +package parse + +import ( + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/celparse" +) + +func (l *parseLibrary) getExecPath(args ref.Val, comm ref.Val) ref.Val { + argsList, err := celparse.ParseList[string](args) + if err != nil { + return types.NewErr("failed to parse args: %v", err) + } + + commStr, ok := comm.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(comm) + } + + // Implement the logic from GetExecPathFromEvent + if len(argsList) > 0 { + if argsList[0] != "" { + return types.String(argsList[0]) + } + } + return types.String(commStr) +} diff --git a/pkg/rulemanager/cel/libraries/parse/parselib.go b/pkg/rulemanager/cel/libraries/parse/parselib.go new file mode 100644 index 000000000..57b05be45 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/parse/parselib.go @@ -0,0 +1,89 @@ +package parse + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" +) + +func New(config config.Config) libraries.Library { + return &parseLibrary{ + functionCache: cache.NewFunctionCache(cache.FunctionCacheConfig{ + MaxSize: config.CelConfigCache.MaxSize, + TTL: config.CelConfigCache.TTL, + }), + } +} + +func Parse(config config.Config) cel.EnvOption { + return cel.Lib(New(config)) +} + +type parseLibrary struct { + functionCache *cache.FunctionCache +} + +func (l *parseLibrary) LibraryName() string { + return "parse" +} + +func (l *parseLibrary) Types() []*cel.Type { + return []*cel.Type{} +} + +func (l *parseLibrary) Declarations() map[string][]cel.FunctionOpt { + return map[string][]cel.FunctionOpt{ + "parse.get_exec_path": { + cel.Overload( + "parse_get_exec_path", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NewErr("expected 2 arguments, got %d", len(values)) + } + return l.getExecPath(values[0], values[1]) + }), + ), + }, + } +} + +func (l *parseLibrary) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{} + for name, overloads := range l.Declarations() { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (l *parseLibrary) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func (l *parseLibrary) CostEstimator() checker.CostEstimator { + return &parseCostEstimator{} +} + +// parseCostEstimator implements the checker.CostEstimator for the 'parse' library. +type parseCostEstimator struct{} + +func (e *parseCostEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + cost := int64(0) + switch function { + case "parse.get_exec_path": + // List parsing + simple array access + string comparison - O(1) operation + cost = 5 + } + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: uint64(cost), Max: uint64(cost)}} +} + +func (e *parseCostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + return nil // Not providing size estimates for now. +} + +// Ensure the implementation satisfies the interface +var _ checker.CostEstimator = (*parseCostEstimator)(nil) +var _ libraries.Library = (*parseLibrary)(nil) diff --git a/pkg/rulemanager/cel/libraries/parse/parsing_test.go b/pkg/rulemanager/cel/libraries/parse/parsing_test.go new file mode 100644 index 000000000..5677c8b56 --- /dev/null +++ b/pkg/rulemanager/cel/libraries/parse/parsing_test.go @@ -0,0 +1,137 @@ +package parse + +import ( + "testing" + + "github.com/google/cel-go/cel" + "github.com/kubescape/node-agent/pkg/config" + "github.com/stretchr/testify/assert" +) + +func TestParseLibrary(t *testing.T) { + env, err := cel.NewEnv( + cel.Variable("event", cel.AnyType), + Parse(config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + tests := []struct { + name string + expr string + expected string + }{ + { + name: "args with first element", + expr: "parse.get_exec_path(['/bin/ls', '-la'], 'ls')", + expected: "/bin/ls", + }, + { + name: "args with empty first element", + expr: "parse.get_exec_path(['', '-la'], 'ls')", + expected: "ls", + }, + { + name: "empty args list", + expr: "parse.get_exec_path([], 'ls')", + expected: "ls", + }, + { + name: "single element in args", + expr: "parse.get_exec_path(['/usr/bin/python'], 'python')", + expected: "/usr/bin/python", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ast, issues := env.Compile(tt.expr) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "event": map[string]interface{}{ + "args": []string{}, + "comm": "test", + }, + }) + if err != nil { + t.Fatalf("failed to eval program: %v", err) + } + + actual, ok := result.Value().(string) + if !ok { + t.Fatalf("expected string result, got %T", result.Value()) + } + + assert.Equal(t, tt.expected, actual, "result should match expected value") + }) + } +} + +func TestParseLibraryErrorCases(t *testing.T) { + env, err := cel.NewEnv( + cel.Variable("event", cel.AnyType), + Parse(config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + tests := []struct { + name string + expr string + expectError bool + }{ + { + name: "wrong number of arguments", + expr: "parse.get_exec_path(['/bin/ls'])", + expectError: true, + }, + { + name: "wrong argument types", + expr: "parse.get_exec_path('not a list', 'ls')", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ast, issues := env.Compile(tt.expr) + if issues != nil { + if tt.expectError { + return // Expected compilation error + } + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + if tt.expectError { + return // Expected program creation error + } + t.Fatalf("failed to create program: %v", err) + } + + _, _, err = program.Eval(map[string]interface{}{ + "event": map[string]interface{}{ + "args": []string{}, + "comm": "test", + }, + }) + if err != nil && tt.expectError { + return // Expected evaluation error + } + if err != nil && !tt.expectError { + t.Fatalf("unexpected error during evaluation: %v", err) + } + }) + } +} diff --git a/pkg/rulemanager/cel/libraries/process/process.go b/pkg/rulemanager/cel/libraries/process/process.go new file mode 100644 index 000000000..d62accecc --- /dev/null +++ b/pkg/rulemanager/cel/libraries/process/process.go @@ -0,0 +1,120 @@ +package process + +import ( + "strings" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/prometheus/procfs" +) + +// LD_PRELOAD_ENV_VARS contains the environment variables that can be used for LD_PRELOAD +var LD_PRELOAD_ENV_VARS = []string{ + "LD_PRELOAD", + "LD_LIBRARY_PATH", + "LD_AUDIT", + "LD_BIND_NOW", + "LD_DEBUG", + "LD_PROFILE", + "LD_USE_LOAD_BIAS", + "LD_SHOW_AUXV", + "LD_ORIGIN_PATH", + "LD_LIBRARY_PATH_FDS", + "LD_ASSUME_KERNEL", + "LD_VERBOSE", + "LD_WARN", + "LD_TRACE_LOADED_OBJECTS", + "LD_BIND_NOT", + "LD_NOWARN", + "LD_HWCAP_MASK", + "LD_SHOW_AUXV", + "LD_USE_LOAD_BIAS", + "LD_ORIGIN_PATH", + "LD_LIBRARY_PATH_FDS", + "LD_ASSUME_KERNEL", + "LD_VERBOSE", + "LD_WARN", + "LD_TRACE_LOADED_OBJECTS", + "LD_BIND_NOT", + "LD_NOWARN", + "LD_HWCAP_MASK", +} + +func (l *processLibrary) getProcessEnv(pid ref.Val) ref.Val { + pidInt, ok := pid.Value().(int64) + if !ok { + return types.MaybeNoSuchOverloadErr(pid) + } + + envMap, err := GetProcessEnv(int(pidInt)) + if err != nil { + return types.NewErr("failed to get process environment: %v", err) + } + + // Convert map[string]string to map[string]interface{} for CEL + result := make(map[string]interface{}) + for k, v := range envMap { + result[k] = v + } + + return types.NewDynamicMap(types.DefaultTypeAdapter, result) +} + +func (l *processLibrary) getLdHookVar(pid ref.Val) ref.Val { + pidUint, ok := pid.Value().(uint64) + if !ok { + return types.MaybeNoSuchOverloadErr(pid) + } + + // Get process environment variables + envMap, err := GetProcessEnv(int(pidUint)) + if err != nil { + return types.String("") + } + + // Check for LD hook variables + envVar, found := GetLdHookVar(envMap) + if !found { + return types.String("") + } + + return types.String(envVar) +} + +// GetProcessEnv retrieves the environment variables for a given process ID +func GetProcessEnv(pid int) (map[string]string, error) { + fs, err := procfs.NewFS("/proc") + if err != nil { + return nil, err + } + + proc, err := fs.Proc(pid) + if err != nil { + return nil, err + } + + env, err := proc.Environ() + if err != nil { + return nil, err + } + + envMap := make(map[string]string) + for _, e := range env { + parts := strings.SplitN(e, "=", 2) + if len(parts) == 2 { + envMap[parts[0]] = parts[1] + } + } + + return envMap, nil +} + +// GetLdHookVar checks if any LD_PRELOAD environment variables are set +func GetLdHookVar(envVars map[string]string) (string, bool) { + for _, envVar := range LD_PRELOAD_ENV_VARS { + if _, ok := envVars[envVar]; ok { + return envVar, true + } + } + return "", false +} diff --git a/pkg/rulemanager/cel/libraries/process/process_test.go b/pkg/rulemanager/cel/libraries/process/process_test.go new file mode 100644 index 000000000..21d86d71f --- /dev/null +++ b/pkg/rulemanager/cel/libraries/process/process_test.go @@ -0,0 +1,244 @@ +package process + +import ( + "fmt" + "testing" + + "github.com/google/cel-go/cel" + "github.com/kubescape/node-agent/pkg/config" + "github.com/stretchr/testify/assert" +) + +func TestProcessLibrary(t *testing.T) { + env, err := cel.NewEnv( + cel.Variable("event", cel.AnyType), + Process(config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + // Get current process PID for testing + currentPID := 1 // Use PID 1 (init process) for testing + + tests := []struct { + name string + expr string + expected interface{} + }{ + { + name: "get_process_env with current process PID", + expr: fmt.Sprintf("process.get_process_env(%d)", currentPID), + expected: map[string]interface{}{}, // This will be empty for PID 1, but the function should work + }, + { + name: "get_ld_hook_var with current process PID", + expr: fmt.Sprintf("process.get_ld_hook_var(%du)", currentPID), + expected: "", // This will be empty for PID 1, but the function should work + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ast, issues := env.Compile(tt.expr) + if issues != nil { + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + t.Fatalf("failed to create program: %v", err) + } + + result, _, err := program.Eval(map[string]interface{}{ + "event": map[string]interface{}{ + "pid": 1234, + }, + }) + if err != nil { + // If we get a permission error, that's expected for PID 1 + // Just check that the function compiled and ran correctly + if tt.name == "get_process_env with current process PID" { + t.Logf("Permission denied for PID 1 (expected): %v", err) + return + } else if tt.name == "get_ld_hook_var with current process PID" { + t.Logf("Permission denied for PID 1 (expected): %v", err) + return + } + t.Fatalf("failed to eval program: %v", err) + } + + actual := result.Value() + // For get_process_env, we expect a map (could be empty) + // For get_ld_hook_var, we expect a string (could be empty) + if tt.name == "get_process_env with current process PID" { + _, isMap := actual.(map[string]interface{}) + assert.True(t, isMap, "get_process_env should return a map") + } else if tt.name == "get_ld_hook_var with current process PID" { + _, isString := actual.(string) + assert.True(t, isString, "get_ld_hook_var should return a string") + } + }) + } +} + +func TestProcessLibraryErrorCases(t *testing.T) { + env, err := cel.NewEnv( + cel.Variable("event", cel.AnyType), + Process(config.Config{}), + ) + if err != nil { + t.Fatalf("failed to create env: %v", err) + } + + tests := []struct { + name string + expr string + expectError bool + }{ + { + name: "get_ld_hook_var wrong number of arguments", + expr: "process.get_ld_hook_var()", + expectError: true, + }, + { + name: "get_ld_hook_var too many arguments", + expr: "process.get_ld_hook_var(123, 'extra')", + expectError: true, + }, + { + name: "get_ld_hook_var wrong argument type", + expr: "process.get_ld_hook_var('not_a_number')", + expectError: true, + }, + { + name: "get_process_env wrong number of arguments", + expr: "process.get_process_env()", + expectError: true, + }, + { + name: "get_process_env too many arguments", + expr: "process.get_process_env(123, 'extra')", + expectError: true, + }, + { + name: "get_process_env wrong argument type", + expr: "process.get_process_env('not_a_number')", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ast, issues := env.Compile(tt.expr) + if issues != nil { + if tt.expectError { + return // Expected compilation error + } + t.Fatalf("failed to compile expression: %v", issues.Err()) + } + + program, err := env.Program(ast) + if err != nil { + if tt.expectError { + return // Expected program creation error + } + t.Fatalf("failed to create program: %v", err) + } + + _, _, err = program.Eval(map[string]interface{}{ + "event": map[string]interface{}{ + "pid": 1234, + }, + }) + if err != nil && tt.expectError { + return // Expected evaluation error + } + if err != nil && !tt.expectError { + t.Fatalf("unexpected error during evaluation: %v", err) + } + }) + } +} + +func TestGetLdHookVar(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + expected string + found bool + }{ + { + name: "LD_PRELOAD present", + envVars: map[string]string{"LD_PRELOAD": "/path/to/lib.so", "PATH": "/usr/bin"}, + expected: "LD_PRELOAD", + found: true, + }, + { + name: "LD_LIBRARY_PATH present", + envVars: map[string]string{"LD_LIBRARY_PATH": "/usr/lib", "HOME": "/home/user"}, + expected: "LD_LIBRARY_PATH", + found: true, + }, + { + name: "LD_AUDIT present", + envVars: map[string]string{"LD_AUDIT": "/path/to/audit.so", "PATH": "/usr/bin"}, + expected: "LD_AUDIT", + found: true, + }, + { + name: "no LD variables present", + envVars: map[string]string{"PATH": "/usr/bin", "HOME": "/home/user"}, + expected: "", + found: false, + }, + { + name: "empty environment", + envVars: map[string]string{}, + expected: "", + found: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, found := GetLdHookVar(tt.envVars) + assert.Equal(t, tt.expected, result) + assert.Equal(t, tt.found, found) + }) + } +} + +func TestLD_PRELOAD_ENV_VARS(t *testing.T) { + // Test that all expected LD environment variables are included + expectedVars := []string{ + "LD_PRELOAD", + "LD_LIBRARY_PATH", + "LD_AUDIT", + "LD_BIND_NOW", + "LD_DEBUG", + "LD_PROFILE", + "LD_USE_LOAD_BIAS", + "LD_SHOW_AUXV", + "LD_ORIGIN_PATH", + "LD_LIBRARY_PATH_FDS", + "LD_ASSUME_KERNEL", + "LD_VERBOSE", + "LD_WARN", + "LD_TRACE_LOADED_OBJECTS", + "LD_BIND_NOT", + "LD_NOWARN", + "LD_HWCAP_MASK", + } + + for _, expectedVar := range expectedVars { + found := false + for _, actualVar := range LD_PRELOAD_ENV_VARS { + if actualVar == expectedVar { + found = true + break + } + } + assert.True(t, found, "Expected LD environment variable %s not found in LD_PRELOAD_ENV_VARS", expectedVar) + } +} diff --git a/pkg/rulemanager/cel/libraries/process/processlib.go b/pkg/rulemanager/cel/libraries/process/processlib.go new file mode 100644 index 000000000..56e31c87e --- /dev/null +++ b/pkg/rulemanager/cel/libraries/process/processlib.go @@ -0,0 +1,111 @@ +package process + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries" + "github.com/kubescape/node-agent/pkg/rulemanager/cel/libraries/cache" +) + +func New(config config.Config) libraries.Library { + return &processLibrary{ + functionCache: cache.NewFunctionCache(cache.FunctionCacheConfig{ + MaxSize: config.CelConfigCache.MaxSize, + TTL: config.CelConfigCache.TTL, + }), + } +} + +func Process(config config.Config) cel.EnvOption { + return cel.Lib(New(config)) +} + +type processLibrary struct { + functionCache *cache.FunctionCache +} + +func (l *processLibrary) LibraryName() string { + return "process" +} + +func (l *processLibrary) Types() []*cel.Type { + return []*cel.Type{} +} + +func (l *processLibrary) Declarations() map[string][]cel.FunctionOpt { + return map[string][]cel.FunctionOpt{ + "process.get_process_env": { + cel.Overload( + "process_get_process_env", []*cel.Type{cel.IntType}, cel.MapType(cel.StringType, cel.StringType), + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 1 { + return types.NewErr("expected 1 argument, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.getProcessEnv(args[0]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "process.get_process_env") + return cachedFunc(values[0]) + }), + ), + }, + "process.get_ld_hook_var": { + cel.Overload( + "process_get_ld_hook_var", []*cel.Type{cel.UintType}, cel.StringType, + cel.FunctionBinding(func(values ...ref.Val) ref.Val { + if len(values) != 1 { + return types.NewErr("expected 1 argument, got %d", len(values)) + } + wrapperFunc := func(args ...ref.Val) ref.Val { + return l.getLdHookVar(args[0]) + } + cachedFunc := l.functionCache.WithCache(wrapperFunc, "process.get_ld_hook_var") + return cachedFunc(values[0]) + }), + ), + }, + } +} + +func (l *processLibrary) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{} + for name, overloads := range l.Declarations() { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (l *processLibrary) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func (l *processLibrary) CostEstimator() checker.CostEstimator { + return &processCostEstimator{} +} + +// processCostEstimator implements the checker.CostEstimator for the 'process' library. +type processCostEstimator struct{} + +func (e *processCostEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + cost := int64(0) + switch function { + case "process.get_process_env": + // File I/O to read /proc/{pid}/environ + O(n) parsing of environment variables + cost = 50 + case "process.get_ld_hook_var": + // File I/O + O(n) environment parsing + O(m) LD_PRELOAD array search (m=41 constants) + cost = 60 + } + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: uint64(cost), Max: uint64(cost)}} +} + +func (e *processCostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + return nil // Not providing size estimates for now. +} + +// Ensure the implementation satisfies the interface +var _ checker.CostEstimator = (*processCostEstimator)(nil) +var _ libraries.Library = (*processLibrary)(nil) diff --git a/pkg/rulemanager/cel/serialize.go b/pkg/rulemanager/cel/serialize.go new file mode 100644 index 000000000..2cd8bcd6c --- /dev/null +++ b/pkg/rulemanager/cel/serialize.go @@ -0,0 +1,44 @@ +package cel + +import ( + "encoding/json" + + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" +) + +// CelSerializer is an interface that serializes events for CEL evaluation. +type CelSerializer interface { + Serialize(event any) map[string]any +} + +// CelEventSerializer is a default implementation of CelSerializer. +type CelEventSerializer struct{} + +func (ces *CelEventSerializer) Serialize(event any) map[string]any { + bytes, err := json.Marshal(event) + if err != nil { + logger.L().Error("Error marshaling event to JSON", helpers.Error(err)) + // Fallback or return an error map + return map[string]any{"error": "serialization failed"} + } + + var eventMap map[string]any + if err := json.Unmarshal(bytes, &eventMap); err != nil { + logger.L().Error("Error unmarshaling JSON to map", helpers.Error(err)) + // Fallback or return an error map + return map[string]any{"error": "deserialization failed"} + } + + if eventMap["Event"] != nil { + if nestedEvent, ok := eventMap["Event"].(map[string]any); ok && nestedEvent["Event"] != nil { + return map[string]any{"event": eventMap["Event"]} + } + } + + return map[string]any{ + "event": eventMap, + } +} + +var _ CelSerializer = (*CelEventSerializer)(nil) diff --git a/pkg/rulemanager/containercallbacks.go b/pkg/rulemanager/containercallbacks.go new file mode 100644 index 000000000..594f1b4dc --- /dev/null +++ b/pkg/rulemanager/containercallbacks.go @@ -0,0 +1,186 @@ +package rulemanager + +import ( + "context" + "fmt" + "strings" + "time" + + apitypes "github.com/armosec/armoapi-go/armotypes" + backoffv5 "github.com/cenkalti/backoff/v5" + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + "github.com/kubescape/node-agent/pkg/utils" +) + +func (rm *RuleManager) monitorContainer(container *containercollection.Container, k8sContainerID string) error { + logger.L().Debug("RuleManager - start monitor on container", + helpers.String("container ID", container.Runtime.ContainerID), + helpers.String("k8s container id", k8sContainerID)) + + syscallTicker := time.NewTicker(syscallPeriod) + + for { + select { + case <-rm.ctx.Done(): + logger.L().Debug("RuleManager - stop monitor on container", + helpers.String("container ID", container.Runtime.ContainerID), + helpers.String("k8s container id", k8sContainerID)) + return nil + case <-syscallTicker.C: + if rm.syscallPeekFunc == nil { + logger.L().Debug("RuleManager - syscallPeekFunc is not set", helpers.String("container ID", container.Runtime.ContainerID)) + continue + } + + if container.Mntns == 0 { + logger.L().Debug("RuleManager - mount namespace ID is not set", helpers.String("container ID", container.Runtime.ContainerID)) + } + + if !rm.trackedContainers.Contains(k8sContainerID) { + logger.L().Debug("RuleManager - container is not tracked", helpers.String("container ID", container.Runtime.ContainerID)) + return nil + } + + var syscalls []string + if syscallsFromFunc, err := rm.syscallPeekFunc(container.Mntns); err == nil { + syscalls = syscallsFromFunc + } + + if len(syscalls) == 0 { + continue + } + + for _, syscall := range syscalls { + event := types.SyscallEvent{ + Event: eventtypes.Event{ + Timestamp: eventtypes.Time(time.Now().UnixNano()), + Type: eventtypes.NORMAL, + CommonData: eventtypes.CommonData{ + Runtime: eventtypes.BasicRuntimeMetadata{ + ContainerID: container.Runtime.ContainerID, + RuntimeName: container.Runtime.RuntimeName, + }, + K8s: eventtypes.K8sMetadata{ + Node: rm.cfg.NodeName, + BasicK8sMetadata: eventtypes.BasicK8sMetadata{ + Namespace: container.K8s.Namespace, + PodName: container.K8s.PodName, + PodLabels: container.K8s.PodLabels, + ContainerName: container.K8s.ContainerName, + }, + }, + }, + }, + WithMountNsID: eventtypes.WithMountNsID{ + MountNsID: container.Mntns, + }, + Pid: container.ContainerPid(), + // TODO: Figure out how to get UID, GID and comm from the syscall. + // Uid: container.OciConfig.Process.User.UID, + // Gid: container.OciConfig.Process.User.GID, + // Comm: container.OciConfig.Process.Args[0], + SyscallName: syscall, + } + + tree, err := rm.processManager.GetContainerProcessTree(container.Runtime.ContainerID, event.Pid, true) + if err != nil { + process := apitypes.Process{ + PID: event.Pid, + } + + tree = process + } + + rm.ReportEnrichedEvent(&events.EnrichedEvent{ + EventType: utils.SyscallEventType, + Event: &event, + ContainerID: container.Runtime.ContainerID, + ProcessTree: tree, + }) + + } + } + } +} + +func (rm *RuleManager) ContainerCallback(notif containercollection.PubSubEvent) { + // check if the container should be ignored + if rm.cfg.IgnoreContainer(notif.Container.K8s.Namespace, notif.Container.K8s.PodName, notif.Container.K8s.PodLabels) { + return + } + + k8sContainerID := utils.CreateK8sContainerID(notif.Container.K8s.Namespace, notif.Container.K8s.PodName, notif.Container.K8s.ContainerName) + + switch notif.Type { + case containercollection.EventTypeAddContainer: + logger.L().Debug("RuleManager - add container", + helpers.String("container ID", notif.Container.Runtime.ContainerID), + helpers.String("k8s workload", k8sContainerID)) + + if rm.trackedContainers.Contains(k8sContainerID) { + logger.L().Debug("RuleManager - container already exist in memory", + helpers.String("container ID", notif.Container.Runtime.ContainerID), + helpers.String("k8s workload", k8sContainerID)) + return + } + + rm.trackedContainers.Add(k8sContainerID) + shim, err := utils.GetProcessStat(int(notif.Container.ContainerPid())) + if err != nil { + logger.L().Warning("RuleManager - failed to get shim process", helpers.Error(err)) + } else { + rm.containerIdToShimPid.Set(notif.Container.Runtime.ContainerID, uint32(shim.PPID)) + } + rm.containerIdToPid.Set(notif.Container.Runtime.ContainerID, notif.Container.ContainerPid()) + go rm.startRuleManager(notif.Container, k8sContainerID) + case containercollection.EventTypeRemoveContainer: + logger.L().Debug("RuleManager - remove container", + helpers.String("container ID", notif.Container.Runtime.ContainerID), + helpers.String("k8s workload", k8sContainerID)) + + rm.trackedContainers.Remove(k8sContainerID) + namespace := notif.Container.K8s.Namespace + podName := notif.Container.K8s.PodName + podID := utils.CreateK8sPodID(namespace, podName) + + time.AfterFunc(10*time.Minute, func() { + stillTracked := false + rm.trackedContainers.Each(func(id string) bool { + // Parse the container ID to reliably extract the pod info + parts := strings.Split(id, "/") + if len(parts) == 3 && parts[0] == namespace && parts[1] == podName { + stillTracked = true + return true // We found a match, can stop iteration + } + return false // No match yet, continue looking + }) + + if !stillTracked { + logger.L().Debug("RuleManager - removing pod from podToWlid map", + helpers.String("podID", podID)) + rm.podToWlid.Delete(podID) + } else { + logger.L().Debug("RuleManager - keeping pod in podToWlid map due to active containers", + helpers.String("podID", podID)) + } + }) + + rm.containerIdToShimPid.Delete(notif.Container.Runtime.ContainerID) + rm.containerIdToPid.Delete(notif.Container.Runtime.ContainerID) + } +} + +func (rm *RuleManager) waitForSharedContainerData(containerID string) (*objectcache.WatchedContainerData, error) { + return backoffv5.Retry(context.Background(), func() (*objectcache.WatchedContainerData, error) { + if sharedData := rm.objectCache.K8sObjectCache().GetSharedContainerData(containerID); sharedData != nil { + return sharedData, nil + } + return nil, fmt.Errorf("container %s not found in shared data", containerID) + }, backoffv5.WithBackOff(backoffv5.NewExponentialBackOff())) +} diff --git a/pkg/rulemanager/profilehelper/profilehelper.go b/pkg/rulemanager/profilehelper/profilehelper.go new file mode 100644 index 000000000..20756675f --- /dev/null +++ b/pkg/rulemanager/profilehelper/profilehelper.go @@ -0,0 +1,115 @@ +package profilehelper + +import ( + "errors" + + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +func GetApplicationProfile(containerID string, objectCache objectcache.ObjectCache) (*v1beta1.ApplicationProfile, error) { + ap := objectCache.ApplicationProfileCache().GetApplicationProfile(containerID) + if ap == nil { + return nil, errors.New("no profile available") + } + return ap, nil +} + +func GetNetworkNeighborhood(containerID string, objectCache objectcache.ObjectCache) (*v1beta1.NetworkNeighborhood, error) { + nn := objectCache.NetworkNeighborhoodCache().GetNetworkNeighborhood(containerID) + if nn == nil { + return nil, errors.New("no profile available") + } + return nn, nil +} + +func GetContainerFromApplicationProfile(ap *v1beta1.ApplicationProfile, containerName string) (v1beta1.ApplicationProfileContainer, error) { + for _, s := range ap.Spec.Containers { + if s.Name == containerName { + return s, nil + } + } + for _, s := range ap.Spec.InitContainers { + if s.Name == containerName { + return s, nil + } + } + for _, s := range ap.Spec.EphemeralContainers { + if s.Name == containerName { + return s, nil + } + } + return v1beta1.ApplicationProfileContainer{}, errors.New("container not found") +} + +func GetContainerFromNetworkNeighborhood(nn *v1beta1.NetworkNeighborhood, containerName string) (v1beta1.NetworkNeighborhoodContainer, error) { + for _, c := range nn.Spec.Containers { + if c.Name == containerName { + return c, nil + } + } + for _, c := range nn.Spec.InitContainers { + if c.Name == containerName { + return c, nil + } + } + for _, c := range nn.Spec.EphemeralContainers { + if c.Name == containerName { + return c, nil + } + } + return v1beta1.NetworkNeighborhoodContainer{}, errors.New("container not found") +} + +func GetContainerName(objectCache objectcache.ObjectCache, containerID string) string { + sharedData := objectCache.K8sObjectCache().GetSharedContainerData(containerID) + if sharedData == nil { + + return "" + } + + containerInfos, exists := sharedData.ContainerInfos[objectcache.ContainerType(sharedData.ContainerType)] + if !exists || len(containerInfos) == 0 { + return "" + } + + return containerInfos[sharedData.ContainerIndex].Name +} + +func GetContainerApplicationProfile(objectCache objectcache.ObjectCache, containerID string) (v1beta1.ApplicationProfileContainer, error) { + ap, err := GetApplicationProfile(containerID, objectCache) + if err != nil { + return v1beta1.ApplicationProfileContainer{}, err + } + + containerName := GetContainerName(objectCache, containerID) + if containerName == "" { + return v1beta1.ApplicationProfileContainer{}, errors.New("container name not found") + } + + container, err := GetContainerFromApplicationProfile(ap, containerName) + if err != nil { + return v1beta1.ApplicationProfileContainer{}, err + } + + return container, nil +} + +func GetContainerNetworkNeighborhood(objectCache objectcache.ObjectCache, containerID string) (v1beta1.NetworkNeighborhoodContainer, error) { + nn, err := GetNetworkNeighborhood(containerID, objectCache) + if err != nil { + return v1beta1.NetworkNeighborhoodContainer{}, err + } + + containerName := GetContainerName(objectCache, containerID) + if containerName == "" { + return v1beta1.NetworkNeighborhoodContainer{}, errors.New("container name not found") + } + + container, err := GetContainerFromNetworkNeighborhood(nn, containerName) + if err != nil { + return v1beta1.NetworkNeighborhoodContainer{}, err + } + + return container, nil +} diff --git a/pkg/rulemanager/rule_manager.go b/pkg/rulemanager/rule_manager.go new file mode 100644 index 000000000..a751c2701 --- /dev/null +++ b/pkg/rulemanager/rule_manager.go @@ -0,0 +1,379 @@ +package rulemanager + +import ( + "context" + "crypto/md5" + "fmt" + "time" + + "github.com/armosec/armoapi-go/armotypes" + mapset "github.com/deckarep/golang-set/v2" + "github.com/goradd/maps" + containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" + "github.com/kubescape/node-agent/pkg/config" + "github.com/kubescape/node-agent/pkg/dnsmanager" + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/exporters" + "github.com/kubescape/node-agent/pkg/k8sclient" + "github.com/kubescape/node-agent/pkg/metricsmanager" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/processtree" + bindingcache "github.com/kubescape/node-agent/pkg/rulebindingmanager" + "github.com/kubescape/node-agent/pkg/rulemanager/profilehelper" + "github.com/kubescape/node-agent/pkg/rulemanager/ruleadapters" + "github.com/kubescape/node-agent/pkg/rulemanager/ruleadapters/adapters" + "github.com/kubescape/node-agent/pkg/rulemanager/rulecooldown" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/utils" + + cel "github.com/kubescape/node-agent/pkg/rulemanager/cel" + corev1 "k8s.io/api/core/v1" +) + +const ( + syscallPeriod = 5 * time.Second +) + +type RuleManager struct { + cfg config.Config + ruleBindingCache bindingcache.RuleBindingCache + trackedContainers mapset.Set[string] // key is k8sContainerID + k8sClient k8sclient.K8sClientInterface + ctx context.Context + objectCache objectcache.ObjectCache + exporter exporters.Exporter + metrics metricsmanager.MetricsManager + syscallPeekFunc func(nsMountId uint64) ([]string, error) + podToWlid maps.SafeMap[string, string] // key is namespace/podName + containerIdToShimPid maps.SafeMap[string, uint32] + containerIdToPid maps.SafeMap[string, uint32] + enricher types.Enricher + processManager processtree.ProcessTreeManager + celEvaluator cel.CELRuleEvaluator + ruleCooldown *rulecooldown.RuleCooldown + adapterFactory *ruleadapters.EventRuleAdapterFactory + ruleFailureCreator ruleadapters.RuleFailureCreatorInterface + rulePolicyValidator *RulePolicyValidator +} + +var _ RuleManagerClient = (*RuleManager)(nil) + +func CreateRuleManager( + ctx context.Context, + cfg config.Config, + k8sClient k8sclient.K8sClientInterface, + ruleBindingCache bindingcache.RuleBindingCache, + objectCache objectcache.ObjectCache, + exporter exporters.Exporter, + metrics metricsmanager.MetricsManager, + processManager processtree.ProcessTreeManager, + dnsManager dnsmanager.DNSResolver, + enricher types.Enricher, + ruleCooldown *rulecooldown.RuleCooldown, + adapterFactory *ruleadapters.EventRuleAdapterFactory, + celEvaluator cel.CELRuleEvaluator, +) (*RuleManager, error) { + ruleFailureCreator := ruleadapters.NewRuleFailureCreator(enricher, dnsManager, adapterFactory) + rulePolicyValidator := NewRulePolicyValidator(objectCache) + + r := &RuleManager{ + cfg: cfg, + ctx: ctx, + k8sClient: k8sClient, + trackedContainers: mapset.NewSet[string](), + ruleBindingCache: ruleBindingCache, + objectCache: objectCache, + exporter: exporter, + metrics: metrics, + adapterFactory: adapterFactory, + enricher: enricher, + processManager: processManager, + ruleCooldown: ruleCooldown, + celEvaluator: celEvaluator, + ruleFailureCreator: ruleFailureCreator, + rulePolicyValidator: rulePolicyValidator, + } + + return r, nil +} + +func (rm *RuleManager) startRuleManager(container *containercollection.Container, k8sContainerID string) { + sharedData, err := rm.waitForSharedContainerData(container.Runtime.ContainerID) + if err != nil { + logger.L().Error("RuleManager - failed to get shared container data", helpers.Error(err)) + return + } + + podID := utils.CreateK8sPodID(container.K8s.Namespace, container.K8s.PodName) + if !rm.podToWlid.Has(podID) { + w := sharedData.Wlid + if w != "" { + rm.podToWlid.Set(podID, w) + } else { + logger.L().Debug("RuleManager - failed to get workload identifier", helpers.String("k8s workload", container.K8s.PodName)) + } + } + + if err := rm.monitorContainer(container, k8sContainerID); err != nil { + logger.L().Debug("RuleManager - stop monitor on container", helpers.String("reason", err.Error()), + helpers.String("container ID", container.Runtime.ContainerID), + helpers.String("k8s container id", k8sContainerID)) + } +} + +func (rm *RuleManager) RegisterPeekFunc(peek func(mntns uint64) ([]string, error)) { + rm.syscallPeekFunc = peek +} + +func (rm *RuleManager) ReportEnrichedEvent(enrichedEvent *events.EnrichedEvent) { + var profileExists bool + podId := utils.CreateK8sPodID(enrichedEvent.Event.GetNamespace(), enrichedEvent.Event.GetPod()) + details, ok := rm.podToWlid.Load(podId) + if !ok { + return + } + + if enrichedEvent.Event.GetPod() == "" || enrichedEvent.Event.GetNamespace() == "" { + return + } + + rules := rm.ruleBindingCache.ListRulesForPod(enrichedEvent.Event.GetNamespace(), enrichedEvent.Event.GetPod()) + if len(rules) == 0 { + return + } + + if !isSupportedEventType(rules, enrichedEvent) { + return + } + + _, err := profilehelper.GetContainerApplicationProfile(rm.objectCache, enrichedEvent.ContainerID) + profileExists = err == nil + + for _, rule := range rules { + if !rule.Enabled { + continue + } + if !profileExists && rule.ProfileDependency == armotypes.Required { + continue + } + + ruleExpressions := rm.getRuleExpressions(rule, enrichedEvent.EventType) + if len(ruleExpressions) == 0 { + continue + } + + if rule.SupportPolicy && rm.validateRulePolicy(rule, enrichedEvent.Event, enrichedEvent.ContainerID) { + continue + } + + startTime := time.Now() + shouldAlert, err := rm.evaluateRule(enrichedEvent, enrichedEvent.EventType, rule) + evaluationTime := time.Since(startTime) + rm.metrics.ReportRuleEvaluationTime(rule.Name, enrichedEvent.EventType, evaluationTime) + + if err != nil { + logger.L().Error("RuleManager - failed to evaluate rule", helpers.Error(err)) + continue + } + + if shouldAlert { + rm.metrics.ReportRuleAlert(rule.Name) + message, uniqueID, err := rm.getUniqueIdAndMessage(enrichedEvent, rule) + if err != nil { + logger.L().Error("RuleManager - failed to get unique ID and message", helpers.Error(err)) + continue + } + + if shouldCooldown, _ := rm.ruleCooldown.ShouldCooldown(uniqueID, enrichedEvent.ContainerID, rule.ID); shouldCooldown { + continue + } + + ruleFailure := rm.ruleFailureCreator.CreateRuleFailure(rule, enrichedEvent, rm.objectCache, message, uniqueID) + if ruleFailure == nil { + logger.L().Error("RuleManager - failed to create rule failure", helpers.String("rule", rule.Name), + helpers.String("message", message), + helpers.String("uniqueID", uniqueID), + helpers.String("enrichedEvent.EventType", string(enrichedEvent.EventType)), + ) + continue + } + + ruleFailure.SetWorkloadDetails(details) + rm.exporter.SendRuleAlert(ruleFailure) + } + rm.metrics.ReportRuleProcessed(rule.Name) + } +} + +func (rm *RuleManager) HasApplicableRuleBindings(namespace, name string) bool { + return len(rm.ruleBindingCache.ListRulesForPod(namespace, name)) > 0 +} + +func (rm *RuleManager) HasFinalApplicationProfile(pod *corev1.Pod) bool { + for _, c := range utils.GetContainerStatuses(pod.Status) { + ap := rm.objectCache.ApplicationProfileCache().GetApplicationProfile(utils.TrimRuntimePrefix(c.ContainerID)) + if ap != nil { + if status, ok := ap.Annotations[helpersv1.StatusMetadataKey]; ok { + // in theory, only completed profiles are stored in cache, but we check anyway + return status == helpersv1.Completed + } + } + } + return false +} + +func (rm *RuleManager) IsContainerMonitored(k8sContainerID string) bool { + return rm.trackedContainers.Contains(k8sContainerID) +} + +func (rm *RuleManager) IsPodMonitored(namespace, pod string) bool { + return rm.podToWlid.Has(utils.CreateK8sPodID(namespace, pod)) +} + +func (rm *RuleManager) EvaluatePolicyRulesForEvent(eventType utils.EventType, event utils.K8sEvent) []string { + results := []string{} + + creator := rm.ruleBindingCache.GetRuleCreator() + rules := creator.CreateRulePolicyRulesByEventType(eventType) + + for _, rule := range rules { + if !rule.SupportPolicy { + continue + } + + enrichedEvent := &events.EnrichedEvent{Event: event, EventType: eventType} + ruleExpressions := rm.getRuleExpressions(rule, eventType) + if len(ruleExpressions) == 0 { + continue + } + + startTime := time.Now() + shouldAlert, err := rm.celEvaluator.EvaluateRule(enrichedEvent, ruleExpressions) + evaluationTime := time.Since(startTime) + rm.metrics.ReportRuleEvaluationTime(rule.ID, eventType, evaluationTime) + + if err != nil { + logger.L().Error("RuleManager - failed to evaluate rule", helpers.Error(err)) + continue + } + + if shouldAlert { + results = append(results, rule.ID) + } + } + + return results +} + +func (rm *RuleManager) evaluateRule(enrichedEvent *events.EnrichedEvent, eventType utils.EventType, rule typesv1.Rule) (bool, error) { + // Special event types are evaluated by map because we're doing parsing optimizations + // TODO: Manage special event types in a better way + if eventType == utils.HTTPEventType { + eventAdapter, ok := rm.adapterFactory.GetAdapter(enrichedEvent.EventType) + if !ok { + logger.L().Error("RuleManager - no adapter registered for event type", helpers.String("eventType", string(enrichedEvent.EventType))) + return false, nil + } + + eventMap := eventAdapter.ToMap(enrichedEvent) + defer adapters.ReleaseEventMap(eventMap) + + shouldAlert, err := rm.celEvaluator.EvaluateRuleByMap(eventMap, eventType, rule.Expressions.RuleExpression) + if err != nil { + logger.L().Error("RuleManager - failed to evaluate rule", helpers.Error(err)) + return false, err + } + return shouldAlert, nil + } else { + shouldAlert, err := rm.celEvaluator.EvaluateRule(enrichedEvent, rule.Expressions.RuleExpression) + if err != nil { + logger.L().Error("RuleManager - failed to evaluate rule", helpers.Error(err)) + return false, err + } + return shouldAlert, nil + } +} + +func (rm *RuleManager) validateRulePolicy(rule typesv1.Rule, event utils.K8sEvent, containerID string) bool { + ap, err := profilehelper.GetContainerApplicationProfile(rm.objectCache, containerID) + if err != nil { + return false + } + + allowed, err := rm.rulePolicyValidator.Validate(rule.ID, utils.GetCommFromEvent(event), &ap) + if err != nil { + logger.L().Error("RuleManager - failed to validate rule policy", helpers.Error(err)) + return false + } + + return allowed +} + +func (rm *RuleManager) getRuleExpressions(rule typesv1.Rule, eventType utils.EventType) []typesv1.RuleExpression { + var ruleExpressions []typesv1.RuleExpression + for _, expression := range rule.Expressions.RuleExpression { + if string(expression.EventType) == string(eventType) { + ruleExpressions = append(ruleExpressions, expression) + } + } + return ruleExpressions +} + +func (rm *RuleManager) getUniqueIdAndMessage(enrichedEvent *events.EnrichedEvent, rule typesv1.Rule) (string, string, error) { + // Special event types are evaluated by map because we're doing parsing optimizations + // TODO: Manage special event types in a better way + if enrichedEvent.EventType == utils.HTTPEventType { + eventAdapter, ok := rm.adapterFactory.GetAdapter(enrichedEvent.EventType) + if !ok { + logger.L().Error("RuleManager - no adapter registered for event type", helpers.String("eventType", string(enrichedEvent.EventType))) + return "", "", nil + } + eventMap := eventAdapter.ToMap(enrichedEvent) + defer adapters.ReleaseEventMap(eventMap) + + message, err := rm.celEvaluator.EvaluateExpressionByMap(eventMap, rule.Expressions.Message, enrichedEvent.EventType) + if err != nil { + logger.L().Error("RuleManager - failed to evaluate message", helpers.Error(err)) + } + uniqueID, err := rm.celEvaluator.EvaluateExpressionByMap(eventMap, rule.Expressions.UniqueID, enrichedEvent.EventType) + if err != nil { + logger.L().Error("RuleManager - failed to evaluate unique ID", helpers.Error(err)) + } + uniqueID = hashStringToMD5(uniqueID) + return message, uniqueID, err + } else { + message, err := rm.celEvaluator.EvaluateExpression(enrichedEvent, rule.Expressions.Message) + if err != nil { + logger.L().Error("RuleManager - failed to evaluate message", helpers.Error(err)) + } + uniqueID, err := rm.celEvaluator.EvaluateExpression(enrichedEvent, rule.Expressions.UniqueID) + if err != nil { + logger.L().Error("RuleManager - failed to evaluate unique ID", helpers.Error(err)) + } + + uniqueID = hashStringToMD5(uniqueID) + + return message, uniqueID, err + } +} + +func isSupportedEventType(rules []typesv1.Rule, enrichedEvent *events.EnrichedEvent) bool { + for _, rule := range rules { + for _, expression := range rule.Expressions.RuleExpression { + if string(expression.EventType) == string(enrichedEvent.EventType) { + return true + } + } + } + return false +} + +func hashStringToMD5(str string) string { + hash := md5.Sum([]byte(str)) + hashString := fmt.Sprintf("%x", hash) + return hashString +} diff --git a/pkg/rulemanager/rule_manager_interface.go b/pkg/rulemanager/rule_manager_interface.go index 7ebcbb40b..f12cec412 100644 --- a/pkg/rulemanager/rule_manager_interface.go +++ b/pkg/rulemanager/rule_manager_interface.go @@ -11,7 +11,6 @@ import ( type RuleManagerClient interface { ContainerCallback(notif containercollection.PubSubEvent) RegisterPeekFunc(peek func(mntns uint64) ([]string, error)) - ReportEvent(eventType utils.EventType, event utils.K8sEvent) HasApplicableRuleBindings(namespace, name string) bool HasFinalApplicationProfile(pod *v1.Pod) bool IsContainerMonitored(k8sContainerID string) bool diff --git a/pkg/rulemanager/rule_manager_mock.go b/pkg/rulemanager/rule_manager_mock.go index 85769c3c6..2860fc507 100644 --- a/pkg/rulemanager/rule_manager_mock.go +++ b/pkg/rulemanager/rule_manager_mock.go @@ -1,49 +1,51 @@ package rulemanager import ( - "github.com/kubescape/node-agent/pkg/utils" - containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - v1 "k8s.io/api/core/v1" + "github.com/kubescape/node-agent/pkg/containerwatcher" + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/utils" + corev1 "k8s.io/api/core/v1" ) type RuleManagerMock struct { } var _ RuleManagerClient = (*RuleManagerMock)(nil) +var _ containerwatcher.EnrichedEventReceiver = (*RuleManagerMock)(nil) -func CreateRuleManagerMock() *RuleManagerMock { +func CreateRuleManagerMock() RuleManagerClient { return &RuleManagerMock{} } -func (r *RuleManagerMock) ContainerCallback(_ containercollection.PubSubEvent) { +func (r RuleManagerMock) ContainerCallback(notif containercollection.PubSubEvent) { // noop } -func (r *RuleManagerMock) RegisterPeekFunc(_ func(mntns uint64) ([]string, error)) { +func (r RuleManagerMock) RegisterPeekFunc(peek func(mntns uint64) ([]string, error)) { // noop } -func (r *RuleManagerMock) ReportEvent(_ utils.EventType, _ utils.K8sEvent) { - // noop -} - -func (r *RuleManagerMock) HasApplicableRuleBindings(_, _ string) bool { +func (r RuleManagerMock) HasApplicableRuleBindings(namespace, name string) bool { return false } -func (r *RuleManagerMock) HasFinalApplicationProfile(_ *v1.Pod) bool { +func (r RuleManagerMock) HasFinalApplicationProfile(pod *corev1.Pod) bool { return false } -func (r *RuleManagerMock) IsContainerMonitored(_ string) bool { +func (r RuleManagerMock) IsContainerMonitored(k8sContainerID string) bool { return false } -func (r *RuleManagerMock) IsPodMonitored(_, _ string) bool { +func (r RuleManagerMock) IsPodMonitored(namespace, pod string) bool { return false } -func (r *RuleManagerMock) EvaluatePolicyRulesForEvent(_ utils.EventType, _ utils.K8sEvent) []string { +func (r RuleManagerMock) EvaluatePolicyRulesForEvent(eventType utils.EventType, event utils.K8sEvent) []string { return []string{} } + +func (r RuleManagerMock) ReportEnrichedEvent(enrichedEvent *events.EnrichedEvent) { + // noop +} diff --git a/pkg/rulemanager/ruleadapters/adapters/capabilities.go b/pkg/rulemanager/ruleadapters/adapters/capabilities.go new file mode 100644 index 000000000..5c199aafc --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/capabilities.go @@ -0,0 +1,83 @@ +package adapters + +import ( + tracercapabilitiestype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/capabilities/types" + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type CapabilitiesAdapter struct { +} + +func NewCapabilitiesAdapter() *CapabilitiesAdapter { + return &CapabilitiesAdapter{} +} + +func (c *CapabilitiesAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + capEvent, ok := enrichedEvent.Event.(*tracercapabilitiestype.Event) + if !ok { + return + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = capEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "syscall": capEvent.Syscall, + "capability": capEvent.CapName, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: capEvent.Comm, + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: capEvent.Comm, + Gid: &capEvent.Gid, + PID: capEvent.Pid, + Uid: &capEvent.Uid, + }, + ContainerID: capEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(capEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: capEvent.GetPod(), + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *CapabilitiesAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + capEvent, ok := enrichedEvent.Event.(*tracercapabilitiestype.Event) + if !ok { + return nil + } + + result := ConvertToMap(&capEvent.Event) + + result["pid"] = capEvent.Pid + result["comm"] = capEvent.Comm + result["syscall"] = capEvent.Syscall + result["uid"] = capEvent.Uid + result["gid"] = capEvent.Gid + result["cap"] = capEvent.Cap + result["capName"] = capEvent.CapName + result["audit"] = capEvent.Audit + result["verdict"] = capEvent.Verdict + result["insetid"] = capEvent.InsetID + result["targetuserns"] = capEvent.TargetUserNs + result["currentuserns"] = capEvent.CurrentUserNs + result["caps"] = capEvent.Caps + result["capsNames"] = capEvent.CapsNames + + result["mountnsid"] = capEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/common.go b/pkg/rulemanager/ruleadapters/adapters/common.go new file mode 100644 index 000000000..8c3c1c15d --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/common.go @@ -0,0 +1,40 @@ +package adapters + +import ( + "github.com/inspektor-gadget/inspektor-gadget/pkg/types" +) + +func ConvertToMap(e *types.Event) map[string]interface{} { + result := AcquireMap() + + result["timestamp"] = e.Timestamp + result["type"] = e.Type + result["message"] = e.Message + + runtime := AcquireMap() + runtime["runtimeName"] = e.Runtime.RuntimeName + runtime["containerId"] = e.Runtime.ContainerID + runtime["containerName"] = e.Runtime.ContainerName + runtime["containerPid"] = e.Runtime.ContainerPID + runtime["containerImageName"] = e.Runtime.ContainerImageName + runtime["containerImageDigest"] = e.Runtime.ContainerImageDigest + runtime["containerStartedAt"] = e.Runtime.ContainerStartedAt + result["runtime"] = runtime + + k8s := AcquireMap() + k8s["node"] = e.K8s.Node + k8s["namespace"] = e.K8s.Namespace + k8s["podName"] = e.K8s.PodName + k8s["podLabels"] = e.K8s.PodLabels + k8s["containerName"] = e.K8s.ContainerName + k8s["hostNetwork"] = e.K8s.HostNetwork + + owner := AcquireMap() + owner["kind"] = e.K8s.Owner.Kind + owner["name"] = e.K8s.Owner.Name + k8s["owner"] = owner + + result["k8s"] = k8s + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/dns.go b/pkg/rulemanager/ruleadapters/adapters/dns.go new file mode 100644 index 000000000..f210cd248 --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/dns.go @@ -0,0 +1,112 @@ +package adapters + +import ( + tracerdnstype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/dns/types" + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type DnsAdapter struct { +} + +func NewDnsAdapter() *DnsAdapter { + return &DnsAdapter{} +} + +func (c *DnsAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + dnsEvent, ok := enrichedEvent.Event.(*tracerdnstype.Event) + if !ok { + return + } + + dstIP := "" + if len(dnsEvent.Addresses) > 0 { + dstIP = dnsEvent.Addresses[0] + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = dnsEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "domain": dnsEvent.DNSName, + "addresses": dnsEvent.Addresses, + "protocol": dnsEvent.Protocol, + "port": dnsEvent.DstPort, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: dnsEvent.Comm, + }, + Dns: &common.DnsEntity{ + Domain: dnsEvent.DNSName, + }, + Network: &common.NetworkEntity{ + DstIP: dstIP, + Protocol: dnsEvent.Protocol, + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: dnsEvent.Comm, + Gid: &dnsEvent.Gid, + PID: dnsEvent.Pid, + Uid: &dnsEvent.Uid, + Pcomm: dnsEvent.Pcomm, + Path: dnsEvent.Exepath, + Cwd: dnsEvent.Cwd, + PPID: dnsEvent.Ppid, + }, + ContainerID: dnsEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(dnsEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: dnsEvent.GetPod(), + PodLabels: dnsEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *DnsAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + dnsEvent, ok := enrichedEvent.Event.(*tracerdnstype.Event) + if !ok { + return nil + } + + result := ConvertToMap(&dnsEvent.Event) + + result["pid"] = dnsEvent.Pid + result["tid"] = dnsEvent.Tid + result["ppid"] = dnsEvent.Ppid + result["comm"] = dnsEvent.Comm + result["pcomm"] = dnsEvent.Pcomm + result["cwd"] = dnsEvent.Cwd + result["exepath"] = dnsEvent.Exepath + result["uid"] = dnsEvent.Uid + result["gid"] = dnsEvent.Gid + result["srcIP"] = dnsEvent.SrcIP + result["dstIP"] = dnsEvent.DstIP + result["srcPort"] = dnsEvent.SrcPort + result["dstPort"] = dnsEvent.DstPort + result["protocol"] = dnsEvent.Protocol + result["id"] = dnsEvent.ID + result["qr"] = dnsEvent.Qr + result["nameserver"] = dnsEvent.Nameserver + result["pktType"] = dnsEvent.PktType + result["qtype"] = dnsEvent.QType + result["name"] = dnsEvent.DNSName + result["rcode"] = dnsEvent.Rcode + result["latency"] = dnsEvent.Latency + result["numAnswers"] = dnsEvent.NumAnswers + result["addresses"] = dnsEvent.Addresses + + result["mountnsid"] = dnsEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/exec.go b/pkg/rulemanager/ruleadapters/adapters/exec.go new file mode 100644 index 000000000..dab20c5af --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/exec.go @@ -0,0 +1,126 @@ +package adapters + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + "github.com/kubescape/node-agent/pkg/utils" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type ExecAdapter struct { +} + +func NewExecAdapter() *ExecAdapter { + return &ExecAdapter{} +} + +func (c *ExecAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + execEvent, ok := enrichedEvent.Event.(*events.ExecEvent) + if !ok { + return + } + + failure.SetExtra(execEvent.GetExtra()) + + execPath := GetExecPathFromEvent(execEvent) + execFullPath := GetExecFullPathFromEvent(execEvent) + upperLayer := execEvent.UpperLayer || execEvent.PupperLayer + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = execEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "retval": execEvent.Retval, + "exec": execPath, + "args": execEvent.Args, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: execEvent.Comm, + CommandLine: fmt.Sprintf("%s %s", execPath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), + }, + File: &common.FileEntity{ + Name: filepath.Base(execFullPath), + Directory: filepath.Dir(execFullPath), + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: execEvent.Comm, + Gid: &execEvent.Gid, + PID: execEvent.Pid, + Uid: &execEvent.Uid, + UpperLayer: &upperLayer, + PPID: execEvent.Ppid, + Pcomm: execEvent.Pcomm, + Cwd: execEvent.Cwd, + Hardlink: execEvent.ExePath, + Path: execFullPath, + Cmdline: fmt.Sprintf("%s %s", execPath, strings.Join(utils.GetExecArgsFromEvent(&execEvent.Event), " ")), + }, + ContainerID: execEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(execEvent.Event.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: execEvent.GetPod(), + PodLabels: execEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func GetExecPathFromEvent(execEvent *events.ExecEvent) string { + if len(execEvent.Args) > 0 { + return execEvent.Args[0] + } + return execEvent.Comm +} + +func GetExecFullPathFromEvent(execEvent *events.ExecEvent) string { + if execEvent.ExePath != "" { + return execEvent.ExePath + } + return GetExecPathFromEvent(execEvent) +} + +func (c *ExecAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + execEvent, ok := enrichedEvent.Event.(*events.ExecEvent) + if !ok { + return nil + } + + result := ConvertToMap(&execEvent.Event.Event) + + result["pid"] = execEvent.Pid + result["tid"] = execEvent.Tid + result["ppid"] = execEvent.Ppid + result["ptid"] = execEvent.Ptid + result["comm"] = execEvent.Comm + result["pcomm"] = execEvent.Pcomm + result["ret"] = execEvent.Retval + result["args"] = execEvent.Args + result["uid"] = execEvent.Uid + result["user"] = execEvent.Username + result["gid"] = execEvent.Gid + result["group"] = execEvent.Groupname + result["upperlayer"] = execEvent.UpperLayer + result["pupperlayer"] = execEvent.PupperLayer + result["loginuid"] = execEvent.LoginUid + result["sessionid"] = execEvent.SessionId + result["cwd"] = execEvent.Cwd + result["exepath"] = execEvent.ExePath + result["file"] = execEvent.File + + result["mountnsid"] = execEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/hardlink.go b/pkg/rulemanager/ruleadapters/adapters/hardlink.go new file mode 100644 index 000000000..ac39e506d --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/hardlink.go @@ -0,0 +1,92 @@ +package adapters + +import ( + "path/filepath" + + "github.com/kubescape/node-agent/pkg/ebpf/events" + tracerhardlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/hardlink/types" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type HardlinkAdapter struct { +} + +func NewHardlinkAdapter() *HardlinkAdapter { + return &HardlinkAdapter{} +} + +func (c *HardlinkAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + hardlinkEvent, ok := enrichedEvent.Event.(*tracerhardlinktype.Event) + if !ok { + return + } + + failure.SetExtra(hardlinkEvent.GetExtra()) + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = hardlinkEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "oldPath": hardlinkEvent.OldPath, + "newPath": hardlinkEvent.NewPath, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: hardlinkEvent.Comm, + }, + File: &common.FileEntity{ + Name: filepath.Base(hardlinkEvent.OldPath), + Directory: filepath.Dir(hardlinkEvent.OldPath), + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: hardlinkEvent.Comm, + PPID: hardlinkEvent.PPid, + PID: hardlinkEvent.Pid, + UpperLayer: &hardlinkEvent.UpperLayer, + Uid: &hardlinkEvent.Uid, + Gid: &hardlinkEvent.Gid, + Path: hardlinkEvent.ExePath, + Hardlink: hardlinkEvent.ExePath, + }, + ContainerID: hardlinkEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(hardlinkEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: hardlinkEvent.GetPod(), + PodLabels: hardlinkEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *HardlinkAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + hardlinkEvent, ok := enrichedEvent.Event.(*tracerhardlinktype.Event) + if !ok { + return nil + } + + result := ConvertToMap(&hardlinkEvent.Event) + + result["pid"] = hardlinkEvent.Pid + result["tid"] = hardlinkEvent.Tid + result["ppid"] = hardlinkEvent.PPid + result["uid"] = hardlinkEvent.Uid + result["gid"] = hardlinkEvent.Gid + result["upperlayer"] = hardlinkEvent.UpperLayer + result["comm"] = hardlinkEvent.Comm + result["exe_path"] = hardlinkEvent.ExePath + result["oldpath"] = hardlinkEvent.OldPath + result["newpath"] = hardlinkEvent.NewPath + + result["mountnsid"] = hardlinkEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/http.go b/pkg/rulemanager/ruleadapters/adapters/http.go new file mode 100644 index 000000000..219a469d0 --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/http.go @@ -0,0 +1,93 @@ +package adapters + +import ( + "github.com/kubescape/node-agent/pkg/ebpf/events" + tracerhttptype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/http/types" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type HTTPAdapter struct { +} + +func NewHTTPAdapter() *HTTPAdapter { + return &HTTPAdapter{} +} + +func (c *HTTPAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + httpEvent, ok := enrichedEvent.Event.(*tracerhttptype.Event) + if !ok { + return + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = httpEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "other_ip": httpEvent.OtherIp, + "other_port": httpEvent.OtherPort, + "internal": httpEvent.Internal, + "direction": httpEvent.Direction, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Network: &common.NetworkEntity{ + DstIP: httpEvent.OtherIp, + DstPort: int(httpEvent.OtherPort), + Protocol: "http", + }, + Http: &common.HttpEntity{ + Method: httpEvent.Request.Method, + Domain: httpEvent.Request.Host, + UserAgent: httpEvent.Request.UserAgent(), + Endpoint: httpEvent.Request.URL.Path, + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + PID: httpEvent.Pid, + Uid: &httpEvent.Uid, + Gid: &httpEvent.Gid, + }, + ContainerID: httpEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(httpEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: httpEvent.GetPod(), + PodLabels: httpEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *HTTPAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + httpEvent, ok := enrichedEvent.Event.(*tracerhttptype.Event) + if !ok { + return nil + } + + result := ConvertToMap(&httpEvent.Event) + + result["pid"] = httpEvent.Pid + result["uid"] = httpEvent.Uid + result["gid"] = httpEvent.Gid + result["other_port"] = httpEvent.OtherPort + result["other_ip"] = httpEvent.OtherIp + result["internal"] = httpEvent.Internal + result["direction"] = httpEvent.Direction + + if httpEvent.Request != nil { + result["request"] = httpEvent.Request + } + if httpEvent.Response != nil { + result["response"] = httpEvent.Response + } + + result["mountnsid"] = httpEvent.MountNsID + + return result +} diff --git a/pkg/ruleengine/v1/helpers/iouring/iouring.go b/pkg/rulemanager/ruleadapters/adapters/iouring.go similarity index 57% rename from pkg/ruleengine/v1/helpers/iouring/iouring.go rename to pkg/rulemanager/ruleadapters/adapters/iouring.go index 401c618a1..d5539f8cc 100644 --- a/pkg/ruleengine/v1/helpers/iouring/iouring.go +++ b/pkg/rulemanager/ruleadapters/adapters/iouring.go @@ -1,8 +1,67 @@ -package iouring +package adapters -import iouringsyscall "github.com/iceber/iouring-go/syscall" +import ( + iouringsyscall "github.com/iceber/iouring-go/syscall" + "github.com/kubescape/node-agent/pkg/ebpf/events" + traceriouringtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/iouring/tracer/types" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type IoUringAdapter struct { +} + +func NewIoUringAdapter() *IoUringAdapter { + return &IoUringAdapter{} +} + +func (c *IoUringAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + iouringEvent, ok := enrichedEvent.Event.(*traceriouringtype.Event) + if !ok { + return + } + + ok, name := GetOpcodeName(uint8(iouringEvent.Opcode)) + if !ok { + return + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = iouringEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "opcode": iouringEvent.Opcode, + "flags": iouringEvent.Flags, + "operation": name, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: iouringEvent.Comm, + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: iouringEvent.Comm, + PID: iouringEvent.Pid, + Uid: &iouringEvent.Uid, + Gid: &iouringEvent.Gid, + }, + ContainerID: iouringEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(iouringEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: iouringEvent.GetPod(), + PodLabels: iouringEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} -// OpcodeMap maps opcodes to their string descriptions var OpcodeMap = map[uint8]string{ iouringsyscall.IORING_OP_NOP: "No operation", iouringsyscall.IORING_OP_READV: "Vector read", @@ -54,7 +113,6 @@ var OpcodeMap = map[uint8]string{ iouringsyscall.IORING_OP_SEND_ZC: "Zero-copy send", } -// GetOpcodeName returns the string description of an opcode func GetOpcodeName(opcode uint8) (bool, string) { if name, ok := OpcodeMap[opcode]; ok { return true, name @@ -62,21 +120,25 @@ func GetOpcodeName(opcode uint8) (bool, string) { return false, "Unknown operation" } -// GetSuspiciousOpcodes returns a list of opcodes that might be considered suspicious -// These are operations that could potentially be used maliciously -func GetSuspiciousOpcodes() []uint8 { - return []uint8{ - iouringsyscall.IORING_OP_OPENAT, // File operations that could be suspicious - iouringsyscall.IORING_OP_OPENAT2, - iouringsyscall.IORING_OP_UNLINKAT, - iouringsyscall.IORING_OP_RENAMEAT, - iouringsyscall.IORING_OP_SYMLINKAT, - iouringsyscall.IORING_OP_LINKAT, - iouringsyscall.IORING_OP_SOCKET, // Network operations that could be suspicious - iouringsyscall.IORING_OP_CONNECT, - iouringsyscall.IORING_OP_SEND_ZC, - iouringsyscall.IORING_OP_SETXATTR, // Extended attribute operations - iouringsyscall.IORING_OP_FSETXATTR, - iouringsyscall.IORING_OP_URING_CMD, // Direct device access +func (c *IoUringAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + iouringEvent, ok := enrichedEvent.Event.(*traceriouringtype.Event) + if !ok { + return nil } + + result := ConvertToMap(&iouringEvent.Event) + + result["opcode"] = iouringEvent.Opcode + result["pid"] = iouringEvent.Pid + result["tid"] = iouringEvent.Tid + result["uid"] = iouringEvent.Uid + result["gid"] = iouringEvent.Gid + result["comm"] = iouringEvent.Comm + result["flags"] = iouringEvent.Flags + result["user_data"] = iouringEvent.UserData + result["identifier"] = iouringEvent.Identifier + + result["mountnsid"] = iouringEvent.MountNsID + + return result } diff --git a/pkg/rulemanager/ruleadapters/adapters/network.go b/pkg/rulemanager/ruleadapters/adapters/network.go new file mode 100644 index 000000000..82a463add --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/network.go @@ -0,0 +1,97 @@ +package adapters + +import ( + tracernetworktype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/network/types" + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type NetworkAdapter struct { +} + +func NewNetworkAdapter() *NetworkAdapter { + return &NetworkAdapter{} +} + +func (c *NetworkAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + networkEvent, ok := enrichedEvent.Event.(*tracernetworktype.Event) + if !ok { + return + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = networkEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "ip": networkEvent.DstEndpoint.Addr, + "port": networkEvent.Port, + "proto": networkEvent.Proto, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: networkEvent.Comm, + }, + Network: &common.NetworkEntity{ + DstIP: networkEvent.DstEndpoint.Addr, + DstPort: int(networkEvent.Port), + Protocol: networkEvent.Proto, + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: networkEvent.Comm, + Gid: &networkEvent.Gid, + PID: networkEvent.Pid, + Uid: &networkEvent.Uid, + }, + ContainerID: networkEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(networkEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: networkEvent.GetPod(), + PodLabels: networkEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *NetworkAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + networkEvent, ok := enrichedEvent.Event.(*tracernetworktype.Event) + if !ok { + return nil + } + + result := ConvertToMap(&networkEvent.Event) + + result["pid"] = networkEvent.Pid + result["tid"] = networkEvent.Tid + result["comm"] = networkEvent.Comm + result["uid"] = networkEvent.Uid + result["gid"] = networkEvent.Gid + result["pktType"] = networkEvent.PktType + result["proto"] = networkEvent.Proto + result["port"] = networkEvent.Port + result["podHostIP"] = networkEvent.PodHostIP + result["podIP"] = networkEvent.PodIP + result["podOwner"] = networkEvent.PodOwner + result["podLabels"] = networkEvent.PodLabels + + dst := AcquireMap() + dst["addr"] = networkEvent.DstEndpoint.Addr + dst["version"] = networkEvent.DstEndpoint.Version + dst["namespace"] = networkEvent.DstEndpoint.Namespace + dst["podname"] = networkEvent.DstEndpoint.Name + dst["kind"] = networkEvent.DstEndpoint.Kind + dst["podlabels"] = networkEvent.DstEndpoint.PodLabels + result["dst"] = dst + + result["mountnsid"] = networkEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/open.go b/pkg/rulemanager/ruleadapters/adapters/open.go new file mode 100644 index 000000000..279d44202 --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/open.go @@ -0,0 +1,91 @@ +package adapters + +import ( + "path/filepath" + + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type OpenAdapter struct { +} + +func NewOpenAdapter() *OpenAdapter { + return &OpenAdapter{} +} + +func (c *OpenAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + openEvent, ok := enrichedEvent.Event.(*events.OpenEvent) + if !ok { + return + } + + failure.SetExtra(openEvent.GetExtra()) + + openEventTyped := openEvent.Event + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = openEventTyped.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "flags": openEventTyped.Flags, + "path": openEventTyped.FullPath, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: openEventTyped.Comm, + }, + File: &common.FileEntity{ + Name: filepath.Base(openEventTyped.FullPath), + Directory: filepath.Dir(openEventTyped.FullPath), + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: openEventTyped.Comm, + Gid: &openEventTyped.Gid, + PID: openEventTyped.Pid, + Uid: &openEventTyped.Uid, + }, + ContainerID: openEventTyped.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(openEventTyped.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: openEventTyped.GetPod(), + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *OpenAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + openEvent, ok := enrichedEvent.Event.(*events.OpenEvent) + if !ok { + return nil + } + + result := ConvertToMap(&openEvent.Event.Event) + + result["pid"] = openEvent.Pid + result["tid"] = openEvent.Tid + result["uid"] = openEvent.Uid + result["gid"] = openEvent.Gid + result["comm"] = openEvent.Comm + result["fd"] = openEvent.Fd + result["err"] = openEvent.Err + result["flags"] = openEvent.Flags + result["flagsRaw"] = openEvent.FlagsRaw + result["mode"] = openEvent.Mode + result["modeRaw"] = openEvent.ModeRaw + result["path"] = openEvent.Path + result["fullPath"] = openEvent.FullPath + + result["mountnsid"] = openEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/pool.go b/pkg/rulemanager/ruleadapters/adapters/pool.go new file mode 100644 index 000000000..778cf46e5 --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/pool.go @@ -0,0 +1,45 @@ +package adapters + +import "sync" + +// mapPool is a pool for map[string]interface{} to reduce allocations. +// We pre-allocate a capacity of 32, which should be a reasonable starting point +// for the number of keys in your event maps. +var mapPool = sync.Pool{ + New: func() interface{} { + return make(map[string]interface{}, 32) + }, +} + +// AcquireMap gets a map from the pool. +func AcquireMap() map[string]interface{} { + return mapPool.Get().(map[string]interface{}) +} + +// ReleaseMap returns a map to the pool after clearing it for reuse. +func ReleaseMap(m map[string]interface{}) { + // Clear all keys from the map to prevent old data from leaking. + clear(m) + mapPool.Put(m) +} + +// ReleaseEventMap releases the main event map and all its nested maps back to the pool. +// This function specifically handles the structure created by ConvertToMap and adapter-specific nested maps. +func ReleaseEventMap(eventMap map[string]interface{}) { + // Release nested maps first + if runtime, ok := eventMap["runtime"].(map[string]interface{}); ok { + ReleaseMap(runtime) + } + if k8s, ok := eventMap["k8s"].(map[string]interface{}); ok { + if owner, ok := k8s["owner"].(map[string]interface{}); ok { + ReleaseMap(owner) + } + ReleaseMap(k8s) + } + // Release adapter-specific nested maps + if dst, ok := eventMap["dst"].(map[string]interface{}); ok { + ReleaseMap(dst) + } + // Release the main map + ReleaseMap(eventMap) +} diff --git a/pkg/rulemanager/ruleadapters/adapters/procfs.go b/pkg/rulemanager/ruleadapters/adapters/procfs.go new file mode 100644 index 000000000..d1e962556 --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/procfs.go @@ -0,0 +1,61 @@ +package adapters + +import ( + "fmt" + + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type ProcfsFailureSetter struct { +} + +func NewProcfsCreator() *ProcfsFailureSetter { + return &ProcfsFailureSetter{} +} + +func (c *ProcfsFailureSetter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + procfsEvent, ok := enrichedEvent.Event.(*events.ProcfsEvent) + if !ok { + return + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = procfsEvent.PID + baseRuntimeAlert.Arguments = map[string]interface{}{ + "ppid": procfsEvent.PPID, + "start_time_ns": procfsEvent.StartTimeNs, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: procfsEvent.Comm, + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: procfsEvent.Comm, + PID: procfsEvent.PID, + PPID: procfsEvent.PPID, + Uid: procfsEvent.Uid, + Gid: procfsEvent.Gid, + Path: procfsEvent.Path, + }, + ContainerID: procfsEvent.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + ruleAlert := apitypes.RuleAlert{ + RuleDescription: fmt.Sprintf("Procfs event detected for process %s (PID: %d)", procfsEvent.Comm, procfsEvent.PID), + } + failure.SetRuleAlert(ruleAlert) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: procfsEvent.GetPod(), + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} diff --git a/pkg/rulemanager/ruleadapters/adapters/ptrace.go b/pkg/rulemanager/ruleadapters/adapters/ptrace.go new file mode 100644 index 000000000..9857da5aa --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/ptrace.go @@ -0,0 +1,81 @@ +package adapters + +import ( + "path/filepath" + + "github.com/kubescape/node-agent/pkg/ebpf/events" + tracerptracetype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ptrace/tracer/types" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type PtraceAdapter struct { +} + +func NewPtraceAdapter() *PtraceAdapter { + return &PtraceAdapter{} +} + +func (c *PtraceAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + ptraceEvent, ok := enrichedEvent.Event.(*tracerptracetype.Event) + if !ok { + return + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = ptraceEvent.Pid + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: ptraceEvent.Comm, + }, + File: &common.FileEntity{ + Name: filepath.Base(ptraceEvent.ExePath), + Directory: filepath.Dir(ptraceEvent.ExePath), + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: ptraceEvent.Comm, + PPID: ptraceEvent.PPid, + PID: ptraceEvent.Pid, + Uid: &ptraceEvent.Uid, + Gid: &ptraceEvent.Gid, + Path: ptraceEvent.ExePath, + }, + ContainerID: ptraceEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(ptraceEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: ptraceEvent.GetPod(), + PodLabels: ptraceEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *PtraceAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + ptraceEvent, ok := enrichedEvent.Event.(*tracerptracetype.Event) + if !ok { + return nil + } + + result := ConvertToMap(&ptraceEvent.Event) + + result["pid"] = ptraceEvent.Pid + result["ppid"] = ptraceEvent.PPid + result["uid"] = ptraceEvent.Uid + result["gid"] = ptraceEvent.Gid + result["request"] = ptraceEvent.Request + result["comm"] = ptraceEvent.Comm + result["exe_path"] = ptraceEvent.ExePath + + result["mountnsid"] = ptraceEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/randomx.go b/pkg/rulemanager/ruleadapters/adapters/randomx.go new file mode 100644 index 000000000..55b4103b1 --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/randomx.go @@ -0,0 +1,76 @@ +package adapters + +import ( + "github.com/kubescape/node-agent/pkg/ebpf/events" + tracerrandomxtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/randomx/types" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type RandomXAdapter struct { +} + +func NewRandomXAdapter() *RandomXAdapter { + return &RandomXAdapter{} +} + +func (c *RandomXAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + randomXEvent, ok := enrichedEvent.Event.(*tracerrandomxtype.Event) + if !ok { + return + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = randomXEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "ppid": randomXEvent.PPid, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: randomXEvent.Comm, + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: randomXEvent.Comm, + PID: randomXEvent.Pid, + Uid: &randomXEvent.Uid, + Gid: &randomXEvent.Gid, + }, + ContainerID: randomXEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(randomXEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: randomXEvent.GetPod(), + PodLabels: randomXEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *RandomXAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + randomXEvent, ok := enrichedEvent.Event.(*tracerrandomxtype.Event) + if !ok { + return nil + } + + result := ConvertToMap(&randomXEvent.Event) + + result["pid"] = randomXEvent.Pid + result["ppid"] = randomXEvent.PPid + result["uid"] = randomXEvent.Uid + result["gid"] = randomXEvent.Gid + result["upperlayer"] = randomXEvent.UpperLayer + result["comm"] = randomXEvent.Comm + result["exe_path"] = randomXEvent.ExePath + + result["mountnsid"] = randomXEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/ssh.go b/pkg/rulemanager/ruleadapters/adapters/ssh.go new file mode 100644 index 000000000..ece7aa1ce --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/ssh.go @@ -0,0 +1,85 @@ +package adapters + +import ( + "github.com/kubescape/node-agent/pkg/ebpf/events" + tracersshtype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/ssh/types" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type SSHAdapter struct { +} + +func NewSSHAdapter() *SSHAdapter { + return &SSHAdapter{} +} + +func (c *SSHAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + sshEvent, ok := enrichedEvent.Event.(*tracersshtype.Event) + if !ok { + return + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = sshEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "src_ip": sshEvent.SrcIP, + "dst_ip": sshEvent.DstIP, + "src_port": sshEvent.SrcPort, + "dst_port": sshEvent.DstPort, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: sshEvent.Comm, + }, + Network: &common.NetworkEntity{ + DstIP: sshEvent.DstIP, + DstPort: int(sshEvent.DstPort), + Protocol: "ssh", + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: sshEvent.Comm, + PID: sshEvent.Pid, + Uid: &sshEvent.Uid, + Gid: &sshEvent.Gid, + }, + ContainerID: sshEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(sshEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: sshEvent.GetPod(), + PodLabels: sshEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *SSHAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + sshEvent, ok := enrichedEvent.Event.(*tracersshtype.Event) + if !ok { + return nil + } + + result := ConvertToMap(&sshEvent.Event) + + result["pid"] = sshEvent.Pid + result["uid"] = sshEvent.Uid + result["gid"] = sshEvent.Gid + result["comm"] = sshEvent.Comm + result["src_port"] = sshEvent.SrcPort + result["dst_port"] = sshEvent.DstPort + result["src_ip"] = sshEvent.SrcIP + result["dst_ip"] = sshEvent.DstIP + + result["mountnsid"] = sshEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/symlink.go b/pkg/rulemanager/ruleadapters/adapters/symlink.go new file mode 100644 index 000000000..f3340aace --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/symlink.go @@ -0,0 +1,92 @@ +package adapters + +import ( + "path/filepath" + + "github.com/kubescape/node-agent/pkg/ebpf/events" + tracersymlinktype "github.com/kubescape/node-agent/pkg/ebpf/gadgets/symlink/types" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type SymlinkAdapter struct { +} + +func NewSymlinkAdapter() *SymlinkAdapter { + return &SymlinkAdapter{} +} + +func (c *SymlinkAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + symlinkEvent, ok := enrichedEvent.Event.(*tracersymlinktype.Event) + if !ok { + return + } + + failure.SetExtra(symlinkEvent.GetExtra()) + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = symlinkEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "oldPath": symlinkEvent.OldPath, + "newPath": symlinkEvent.NewPath, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: symlinkEvent.Comm, + }, + File: &common.FileEntity{ + Name: filepath.Base(symlinkEvent.OldPath), + Directory: filepath.Dir(symlinkEvent.OldPath), + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: symlinkEvent.Comm, + PPID: symlinkEvent.PPid, + PID: symlinkEvent.Pid, + UpperLayer: &symlinkEvent.UpperLayer, + Uid: &symlinkEvent.Uid, + Gid: &symlinkEvent.Gid, + Hardlink: symlinkEvent.ExePath, + Path: symlinkEvent.ExePath, + }, + ContainerID: symlinkEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(symlinkEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: symlinkEvent.GetPod(), + PodLabels: symlinkEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *SymlinkAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + symlinkEvent, ok := enrichedEvent.Event.(*tracersymlinktype.Event) + if !ok { + return nil + } + + result := ConvertToMap(&symlinkEvent.Event) + + result["pid"] = symlinkEvent.Pid + result["tid"] = symlinkEvent.Tid + result["ppid"] = symlinkEvent.PPid + result["uid"] = symlinkEvent.Uid + result["gid"] = symlinkEvent.Gid + result["upperlayer"] = symlinkEvent.UpperLayer + result["comm"] = symlinkEvent.Comm + result["exe_path"] = symlinkEvent.ExePath + result["oldpath"] = symlinkEvent.OldPath + result["newpath"] = symlinkEvent.NewPath + + result["mountnsid"] = symlinkEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/adapters/syscall.go b/pkg/rulemanager/ruleadapters/adapters/syscall.go new file mode 100644 index 000000000..c231601e9 --- /dev/null +++ b/pkg/rulemanager/ruleadapters/adapters/syscall.go @@ -0,0 +1,73 @@ +package adapters + +import ( + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/armosec/armoapi-go/armotypes/common" +) + +type SyscallAdapter struct { +} + +func NewSyscallAdapter() *SyscallAdapter { + return &SyscallAdapter{} +} + +func (c *SyscallAdapter) SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) { + syscallEvent, ok := enrichedEvent.Event.(*types.SyscallEvent) + if !ok { + return + } + + baseRuntimeAlert := failure.GetBaseRuntimeAlert() + baseRuntimeAlert.InfectedPID = syscallEvent.Pid + baseRuntimeAlert.Arguments = map[string]interface{}{ + "syscall": syscallEvent.SyscallName, + } + baseRuntimeAlert.Identifiers = &common.Identifiers{ + Process: &common.ProcessEntity{ + Name: syscallEvent.Comm, + }, + } + failure.SetBaseRuntimeAlert(baseRuntimeAlert) + + runtimeProcessDetails := apitypes.ProcessTree{ + ProcessTree: apitypes.Process{ + Comm: syscallEvent.Comm, + Gid: &syscallEvent.Gid, + PID: syscallEvent.Pid, + Uid: &syscallEvent.Uid, + }, + ContainerID: syscallEvent.Runtime.ContainerID, + } + failure.SetRuntimeProcessDetails(runtimeProcessDetails) + + failure.SetTriggerEvent(syscallEvent.Event) + + runtimeAlertK8sDetails := apitypes.RuntimeAlertK8sDetails{ + PodName: syscallEvent.GetPod(), + PodLabels: syscallEvent.K8s.PodLabels, + } + failure.SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails) +} + +func (c *SyscallAdapter) ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} { + syscallEvent, ok := enrichedEvent.Event.(*types.SyscallEvent) + if !ok { + return nil + } + + result := ConvertToMap(&syscallEvent.Event) + + result["pid"] = syscallEvent.Pid + result["comm"] = syscallEvent.Comm + result["uid"] = syscallEvent.Uid + result["gid"] = syscallEvent.Gid + result["syscallName"] = syscallEvent.SyscallName + + result["mountnsid"] = syscallEvent.MountNsID + + return result +} diff --git a/pkg/rulemanager/ruleadapters/creator.go b/pkg/rulemanager/ruleadapters/creator.go new file mode 100644 index 000000000..16025baa9 --- /dev/null +++ b/pkg/rulemanager/ruleadapters/creator.go @@ -0,0 +1,271 @@ +package ruleadapters + +import ( + "errors" + "fmt" + "path/filepath" + "reflect" + "time" + + "github.com/armosec/armoapi-go/armotypes" + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/dustin/go-humanize" + "github.com/goradd/maps" + expirable "github.com/hashicorp/golang-lru/v2/expirable" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" + "github.com/kubescape/node-agent/pkg/dnsmanager" + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/node-agent/pkg/rulemanager/types" + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/utils" +) + +const ( + maxFileSize = 50 * 1024 * 1024 // 50MB + hashCacheTTL = 1 * time.Minute + hashCacheMaxSize = 50000 +) + +var ErrRuleShouldNotBeAlerted = errors.New("rule should not be alerted") + +type FileHashCache struct { + SHA1Hash string + MD5Hash string +} + +type RuleFailureCreator struct { + adapterFactory *EventRuleAdapterFactory + containerIdToPid *maps.SafeMap[string, uint32] + dnsManager dnsmanager.DNSResolver + enricher types.Enricher + hashCache *expirable.LRU[string, *FileHashCache] +} + +func NewRuleFailureCreator(enricher types.Enricher, dnsManager dnsmanager.DNSResolver, adapterFactory *EventRuleAdapterFactory) *RuleFailureCreator { + hashCache := expirable.NewLRU[string, *FileHashCache](hashCacheMaxSize, nil, hashCacheTTL) + return &RuleFailureCreator{ + adapterFactory: adapterFactory, + dnsManager: dnsManager, + enricher: enricher, + hashCache: hashCache, + } +} + +func (r *RuleFailureCreator) CreateRuleFailure(rule typesv1.Rule, enrichedEvent *events.EnrichedEvent, objectCache objectcache.ObjectCache, message, uniqueID string) types.RuleFailure { + eventAdapter, ok := r.adapterFactory.GetAdapter(enrichedEvent.EventType) + if !ok { + logger.L().Error("RuleFailureCreator - no adapter registered for event type", helpers.String("eventType", string(enrichedEvent.EventType))) + return nil + } + + ruleFailure := &types.GenericRuleFailure{ + BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ + UniqueID: uniqueID, + AlertName: rule.Name, + Severity: rule.Severity, + Arguments: map[string]interface{}{ + "message": message, + }, + Timestamp: enrichedEvent.Timestamp, + InfectedPID: enrichedEvent.ProcessTree.PID, + }, + RuleAlert: apitypes.RuleAlert{ + RuleDescription: message, + }, + RuleID: rule.ID, + AlertPlatform: apitypes.AlertSourcePlatformK8s, + } + + eventAdapter.SetFailureMetadata(ruleFailure, enrichedEvent) + + r.setBaseRuntimeAlert(ruleFailure) + r.setRuntimeAlertK8sDetails(ruleFailure) + r.setCloudServices(ruleFailure) + r.setProfileMetadata(rule, ruleFailure, objectCache) + r.enrichRuleFailure(ruleFailure) + + if enrichedEvent.ProcessTree.PID != 0 { + ruleFailure.SetRuntimeProcessDetails(apitypes.ProcessTree{ + ProcessTree: enrichedEvent.ProcessTree, + ContainerID: enrichedEvent.ContainerID, + }) + } + + return ruleFailure +} + +func (r *RuleFailureCreator) enrichRuleFailure(ruleFailure *types.GenericRuleFailure) { + if r.enricher != nil && !reflect.ValueOf(r.enricher).IsNil() { + if err := r.enricher.EnrichRuleFailure(ruleFailure); err != nil { + if errors.Is(err, ErrRuleShouldNotBeAlerted) { // TODO: @amitschendel - I think this check doesn't work. + return + } + } + } +} + +func (r *RuleFailureCreator) setProfileMetadata(rule typesv1.Rule, ruleFailure *types.GenericRuleFailure, objectCache objectcache.ObjectCache) { + var profileType armotypes.ProfileType + baseRuntimeAlert := ruleFailure.GetBaseRuntimeAlert() + profileRequirment := rule.ProfileDependency + if !(profileRequirment == armotypes.Required || profileRequirment == armotypes.Optional) { + return + } + + for _, tag := range rule.Tags { + switch tag { + case types.ApplicationProfile: + profileType = armotypes.ApplicationProfile + case types.NetworkProfile: + profileType = armotypes.NetworkProfile + } + } + + switch profileType { + case armotypes.ApplicationProfile: + state := objectCache.ApplicationProfileCache().GetApplicationProfileState(ruleFailure.GetTriggerEvent().Runtime.ContainerID) + if state != nil { + profileMetadata := &armotypes.ProfileMetadata{ + Status: state.Status, + Completion: state.Completion, + Name: state.Name, + FailOnProfile: state.Status == helpersv1.Completed, + Type: armotypes.ApplicationProfile, + ProfileDependency: profileRequirment, + Error: state.Error, + } + baseRuntimeAlert.ProfileMetadata = profileMetadata + } + + case armotypes.NetworkProfile: + state := objectCache.NetworkNeighborhoodCache().GetNetworkNeighborhoodState(ruleFailure.GetTriggerEvent().Runtime.ContainerID) + if state != nil { + profileMetadata := &armotypes.ProfileMetadata{ + Status: state.Status, + Completion: state.Completion, + Name: state.Name, + FailOnProfile: state.Status == helpersv1.Completed, + Type: armotypes.NetworkProfile, + ProfileDependency: profileRequirment, + Error: state.Error, + } + baseRuntimeAlert.ProfileMetadata = profileMetadata + } + default: + profileMetadata := &armotypes.ProfileMetadata{ + ProfileDependency: profileRequirment, + FailOnProfile: false, + Error: fmt.Errorf("profile type %d not supported", profileRequirment), + } + baseRuntimeAlert.ProfileMetadata = profileMetadata + } + ruleFailure.SetBaseRuntimeAlert(baseRuntimeAlert) +} + +func (r *RuleFailureCreator) setCloudServices(ruleFailure *types.GenericRuleFailure) { + if cloudServices := r.dnsManager.ResolveContainerProcessToCloudServices(ruleFailure.GetTriggerEvent().Runtime.ContainerID, ruleFailure.GetBaseRuntimeAlert().InfectedPID); cloudServices != nil { + ruleFailure.SetCloudServices(cloudServices.ToSlice()) + } + +} + +func (r *RuleFailureCreator) setBaseRuntimeAlert(ruleFailure *types.GenericRuleFailure) { + var hostPath string + var err error + var path string + + if ruleFailure.GetRuntimeProcessDetails().ProcessTree.Path == "" { + path, err = utils.GetPathFromPid(ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID) + if err != nil { + return + } + hostPath = filepath.Join("/proc", fmt.Sprintf("/%d/root/%s", ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID, path)) + } + + if err != nil { + if ruleFailure.GetRuntimeProcessDetails().ProcessTree.Path != "" { + hostPath = filepath.Join("/proc", fmt.Sprintf("/%d/root/%s", r.containerIdToPid.Get(ruleFailure.GetTriggerEvent().Runtime.ContainerID), + ruleFailure.GetRuntimeProcessDetails().ProcessTree.Path)) + } + } else { + hostPath = filepath.Join("/proc", fmt.Sprintf("/%d/root/%s", ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID, path)) + } + + baseRuntimeAlert := ruleFailure.GetBaseRuntimeAlert() + + baseRuntimeAlert.Timestamp = time.Unix(0, int64(ruleFailure.GetTriggerEvent().Timestamp)) + var size int64 = 0 + if hostPath != "" { + size, err = utils.GetFileSize(hostPath) + if err != nil { + size = 0 + } + } + + if baseRuntimeAlert.Size == "" && hostPath != "" && size != 0 { + baseRuntimeAlert.Size = humanize.Bytes(uint64(size)) + } + + if size != 0 && size < maxFileSize && hostPath != "" { + if baseRuntimeAlert.MD5Hash == "" || baseRuntimeAlert.SHA1Hash == "" { + if cached, found := r.hashCache.Get(hostPath); found { + baseRuntimeAlert.MD5Hash = cached.MD5Hash + baseRuntimeAlert.SHA1Hash = cached.SHA1Hash + } else { + sha1hash, md5hash, err := utils.CalculateFileHashes(hostPath) + if err == nil { + baseRuntimeAlert.MD5Hash = md5hash + baseRuntimeAlert.SHA1Hash = sha1hash + r.hashCache.Add(hostPath, &FileHashCache{ + SHA1Hash: sha1hash, + MD5Hash: md5hash, + }) + } + } + } + } + + ruleFailure.SetBaseRuntimeAlert(baseRuntimeAlert) + +} + +func (r *RuleFailureCreator) setRuntimeAlertK8sDetails(ruleFailure *types.GenericRuleFailure) { + runtimek8sdetails := ruleFailure.GetRuntimeAlertK8sDetails() + if runtimek8sdetails.Image == "" { + runtimek8sdetails.Image = ruleFailure.GetTriggerEvent().Runtime.ContainerImageName + } + + if runtimek8sdetails.ImageDigest == "" { + runtimek8sdetails.ImageDigest = ruleFailure.GetTriggerEvent().Runtime.ContainerImageDigest + } + + if runtimek8sdetails.Namespace == "" { + runtimek8sdetails.Namespace = ruleFailure.GetTriggerEvent().K8s.Namespace + } + + if runtimek8sdetails.PodName == "" { + runtimek8sdetails.PodName = ruleFailure.GetTriggerEvent().K8s.PodName + } + + if runtimek8sdetails.PodNamespace == "" { + runtimek8sdetails.PodNamespace = ruleFailure.GetTriggerEvent().K8s.Namespace + } + + if runtimek8sdetails.ContainerName == "" { + runtimek8sdetails.ContainerName = ruleFailure.GetTriggerEvent().K8s.ContainerName + } + + if runtimek8sdetails.ContainerID == "" { + runtimek8sdetails.ContainerID = ruleFailure.GetTriggerEvent().Runtime.ContainerID + } + + if runtimek8sdetails.HostNetwork == nil { + hostNetwork := ruleFailure.GetTriggerEvent().K8s.HostNetwork + runtimek8sdetails.HostNetwork = &hostNetwork + } + + ruleFailure.SetRuntimeAlertK8sDetails(runtimek8sdetails) +} diff --git a/pkg/rulemanager/ruleadapters/creator_interface.go b/pkg/rulemanager/ruleadapters/creator_interface.go new file mode 100644 index 000000000..918a5538a --- /dev/null +++ b/pkg/rulemanager/ruleadapters/creator_interface.go @@ -0,0 +1,16 @@ +package ruleadapters + +import ( + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/objectcache" + types "github.com/kubescape/node-agent/pkg/rulemanager/types" + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" +) + +type RuleFailureCreatorInterface interface { + CreateRuleFailure(rule typesv1.Rule, enrichedEvent *events.EnrichedEvent, objectCache objectcache.ObjectCache, message, uniqueID string) types.RuleFailure +} + +type EventMetadataSetter interface { + SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) +} diff --git a/pkg/rulemanager/ruleadapters/factory.go b/pkg/rulemanager/ruleadapters/factory.go new file mode 100644 index 000000000..03d9af96f --- /dev/null +++ b/pkg/rulemanager/ruleadapters/factory.go @@ -0,0 +1,43 @@ +package ruleadapters + +import ( + "github.com/kubescape/node-agent/pkg/rulemanager/ruleadapters/adapters" + "github.com/kubescape/node-agent/pkg/utils" +) + +type EventRuleAdapterFactory struct { + adapters map[utils.EventType]EventRuleAdapter +} + +func NewEventRuleAdapterFactory() *EventRuleAdapterFactory { + factory := &EventRuleAdapterFactory{ + adapters: make(map[utils.EventType]EventRuleAdapter), + } + factory.registerAllAdapters() + return factory +} + +func (f *EventRuleAdapterFactory) GetAdapter(eventType utils.EventType) (EventRuleAdapter, bool) { + adapter, exists := f.adapters[eventType] + return adapter, exists +} + +func (f *EventRuleAdapterFactory) RegisterAdapter(eventType utils.EventType, adapter EventRuleAdapter) { + f.adapters[eventType] = adapter +} + +func (f *EventRuleAdapterFactory) registerAllAdapters() { + f.RegisterAdapter(utils.ExecveEventType, adapters.NewExecAdapter()) + f.RegisterAdapter(utils.OpenEventType, adapters.NewOpenAdapter()) + f.RegisterAdapter(utils.CapabilitiesEventType, adapters.NewCapabilitiesAdapter()) + f.RegisterAdapter(utils.DnsEventType, adapters.NewDnsAdapter()) + f.RegisterAdapter(utils.NetworkEventType, adapters.NewNetworkAdapter()) + f.RegisterAdapter(utils.SyscallEventType, adapters.NewSyscallAdapter()) + f.RegisterAdapter(utils.SymlinkEventType, adapters.NewSymlinkAdapter()) + f.RegisterAdapter(utils.HardlinkEventType, adapters.NewHardlinkAdapter()) + f.RegisterAdapter(utils.SSHEventType, adapters.NewSSHAdapter()) + f.RegisterAdapter(utils.HTTPEventType, adapters.NewHTTPAdapter()) + f.RegisterAdapter(utils.PtraceEventType, adapters.NewPtraceAdapter()) + f.RegisterAdapter(utils.IoUringEventType, adapters.NewIoUringAdapter()) + f.RegisterAdapter(utils.RandomXEventType, adapters.NewRandomXAdapter()) +} diff --git a/pkg/rulemanager/ruleadapters/rule_event_expansion.go b/pkg/rulemanager/ruleadapters/rule_event_expansion.go new file mode 100644 index 000000000..2645cb49b --- /dev/null +++ b/pkg/rulemanager/ruleadapters/rule_event_expansion.go @@ -0,0 +1,12 @@ +package ruleadapters + +import ( + "github.com/kubescape/node-agent/pkg/ebpf/events" + "github.com/kubescape/node-agent/pkg/rulemanager/types" +) + +type EventRuleAdapter interface { + SetFailureMetadata(failure types.RuleFailure, enrichedEvent *events.EnrichedEvent) + + ToMap(enrichedEvent *events.EnrichedEvent) map[string]interface{} +} diff --git a/pkg/rulemanager/v1/rulecooldown/rulecooldown.go b/pkg/rulemanager/rulecooldown/rulecooldown.go similarity index 71% rename from pkg/rulemanager/v1/rulecooldown/rulecooldown.go rename to pkg/rulemanager/rulecooldown/rulecooldown.go index a1b443666..cc8e61cb6 100644 --- a/pkg/rulemanager/v1/rulecooldown/rulecooldown.go +++ b/pkg/rulemanager/rulecooldown/rulecooldown.go @@ -5,7 +5,6 @@ import ( "github.com/hashicorp/golang-lru/v2/expirable" "github.com/kubescape/go-logger" - "github.com/kubescape/node-agent/pkg/ruleengine" ) type RuleCooldownConfig struct { @@ -33,12 +32,10 @@ func NewRuleCooldown(config RuleCooldownConfig) *RuleCooldown { } } -func (rc *RuleCooldown) ShouldCooldown(ruleFailures ruleengine.RuleFailure) (bool, int) { - alert := ruleFailures.GetBaseRuntimeAlert() - key := alert.UniqueID + ruleFailures.GetRuntimeProcessDetails().ContainerID + ruleFailures.GetRuleId() +func (rc *RuleCooldown) ShouldCooldown(uniqueID string, containerID string, ruleID string) (bool, int) { + key := uniqueID + containerID + ruleID - // If we're not on profile failure, and the profile failed, don't cooldown - if !rc.cooldownConfig.OnProfileFailure && alert.ProfileMetadata.FailOnProfile { + if !rc.cooldownConfig.OnProfileFailure { return false, 1 } diff --git a/pkg/rulemanager/v1/rulecooldown/rulecooldown_test.go b/pkg/rulemanager/rulecooldown/rulecooldown_test.go similarity index 73% rename from pkg/rulemanager/v1/rulecooldown/rulecooldown_test.go rename to pkg/rulemanager/rulecooldown/rulecooldown_test.go index 392282129..ded2dd121 100644 --- a/pkg/rulemanager/v1/rulecooldown/rulecooldown_test.go +++ b/pkg/rulemanager/rulecooldown/rulecooldown_test.go @@ -6,7 +6,7 @@ import ( "time" apitypes "github.com/armosec/armoapi-go/armotypes" - "github.com/kubescape/node-agent/pkg/ruleengine/v1" + "github.com/kubescape/node-agent/pkg/rulemanager/types" "github.com/stretchr/testify/assert" ) @@ -14,7 +14,7 @@ func TestShouldCooldown(t *testing.T) { tests := []struct { name string config RuleCooldownConfig - ruleFailure *ruleengine.GenericRuleFailure + ruleFailure *types.GenericRuleFailure expectedCooldown bool expectedCount int iterations int @@ -29,7 +29,7 @@ func TestShouldCooldown(t *testing.T) { OnProfileFailure: true, MaxSize: 1000, }, - ruleFailure: &ruleengine.GenericRuleFailure{ + ruleFailure: &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-1", }, @@ -50,7 +50,7 @@ func TestShouldCooldown(t *testing.T) { OnProfileFailure: true, MaxSize: 1000, }, - ruleFailure: &ruleengine.GenericRuleFailure{ + ruleFailure: &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-2", }, @@ -71,7 +71,7 @@ func TestShouldCooldown(t *testing.T) { OnProfileFailure: false, MaxSize: 1000, }, - ruleFailure: &ruleengine.GenericRuleFailure{ + ruleFailure: &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-3", ProfileMetadata: &apitypes.ProfileMetadata{ @@ -95,7 +95,7 @@ func TestShouldCooldown(t *testing.T) { OnProfileFailure: true, MaxSize: 1000, }, - ruleFailure: &ruleengine.GenericRuleFailure{ + ruleFailure: &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-4", }, @@ -117,7 +117,7 @@ func TestShouldCooldown(t *testing.T) { OnProfileFailure: true, MaxSize: 1000, }, - ruleFailure: &ruleengine.GenericRuleFailure{ + ruleFailure: &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-3", ProfileMetadata: &apitypes.ProfileMetadata{ @@ -150,7 +150,7 @@ func TestShouldCooldown(t *testing.T) { if i > 0 && tt.waitBetweenCalls > 0 { time.Sleep(tt.waitBetweenCalls) } - lastCooldown, lastCount = rc.ShouldCooldown(tt.ruleFailure) + lastCooldown, lastCount = rc.ShouldCooldown(tt.ruleFailure.BaseRuntimeAlert.UniqueID, tt.ruleFailure.RuntimeProcessDetails.ContainerID, tt.ruleFailure.GetRuleId()) } assert.Equal(t, tt.expectedCooldown, lastCooldown) @@ -167,7 +167,7 @@ func TestShouldCooldownImmediate(t *testing.T) { MaxSize: 1000, }) - ruleFailure := &ruleengine.GenericRuleFailure{ + ruleFailure := &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-immediate", }, @@ -177,12 +177,12 @@ func TestShouldCooldownImmediate(t *testing.T) { } // First call should trigger cooldown immediately - cooldown, count := rc.ShouldCooldown(ruleFailure) + cooldown, count := rc.ShouldCooldown(ruleFailure.BaseRuntimeAlert.UniqueID, ruleFailure.RuntimeProcessDetails.ContainerID, ruleFailure.GetRuleId()) assert.False(t, cooldown) assert.Equal(t, 1, count) // Second call should still be in cooldown - cooldown, count = rc.ShouldCooldown(ruleFailure) + cooldown, count = rc.ShouldCooldown(ruleFailure.BaseRuntimeAlert.UniqueID, ruleFailure.RuntimeProcessDetails.ContainerID, ruleFailure.GetRuleId()) assert.True(t, cooldown) assert.Equal(t, 2, count) } @@ -195,7 +195,7 @@ func TestShouldCooldownOnProfileFailure(t *testing.T) { MaxSize: 1000, }) - ruleFailure := &ruleengine.GenericRuleFailure{ + ruleFailure := &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-profile", ProfileMetadata: &apitypes.ProfileMetadata{ @@ -208,17 +208,17 @@ func TestShouldCooldownOnProfileFailure(t *testing.T) { } // First call should not cooldown - cooldown, count := rc.ShouldCooldown(ruleFailure) + cooldown, count := rc.ShouldCooldown(ruleFailure.BaseRuntimeAlert.UniqueID, ruleFailure.RuntimeProcessDetails.ContainerID, ruleFailure.GetRuleId()) assert.False(t, cooldown) assert.Equal(t, 1, count) // Second call should not cooldown - cooldown, count = rc.ShouldCooldown(ruleFailure) + cooldown, count = rc.ShouldCooldown(ruleFailure.BaseRuntimeAlert.UniqueID, ruleFailure.RuntimeProcessDetails.ContainerID, ruleFailure.GetRuleId()) assert.False(t, cooldown) assert.Equal(t, 2, count) // Third call should cooldown - cooldown, count = rc.ShouldCooldown(ruleFailure) + cooldown, count = rc.ShouldCooldown(ruleFailure.BaseRuntimeAlert.UniqueID, ruleFailure.RuntimeProcessDetails.ContainerID, ruleFailure.GetRuleId()) assert.True(t, cooldown) assert.Equal(t, 3, count) } @@ -232,7 +232,7 @@ func TestShouldCooldownDifferentKeys(t *testing.T) { }) // First rule failure - ruleFailure1 := &ruleengine.GenericRuleFailure{ + ruleFailure1 := &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-1", }, @@ -242,7 +242,7 @@ func TestShouldCooldownDifferentKeys(t *testing.T) { } // Second rule failure with different key - ruleFailure2 := &ruleengine.GenericRuleFailure{ + ruleFailure2 := &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-2", }, @@ -252,22 +252,22 @@ func TestShouldCooldownDifferentKeys(t *testing.T) { } // First failure - first call - cooldown, count := rc.ShouldCooldown(ruleFailure1) + cooldown, count := rc.ShouldCooldown(ruleFailure1.BaseRuntimeAlert.UniqueID, ruleFailure1.RuntimeProcessDetails.ContainerID, ruleFailure1.GetRuleId()) assert.False(t, cooldown) assert.Equal(t, 1, count) // Second failure - first call - cooldown, count = rc.ShouldCooldown(ruleFailure2) + cooldown, count = rc.ShouldCooldown(ruleFailure2.BaseRuntimeAlert.UniqueID, ruleFailure2.RuntimeProcessDetails.ContainerID, ruleFailure2.GetRuleId()) assert.False(t, cooldown) assert.Equal(t, 1, count) // First failure - second call - cooldown, count = rc.ShouldCooldown(ruleFailure1) + cooldown, count = rc.ShouldCooldown(ruleFailure1.BaseRuntimeAlert.UniqueID, ruleFailure1.RuntimeProcessDetails.ContainerID, ruleFailure1.GetRuleId()) assert.False(t, cooldown) assert.Equal(t, 2, count) // Second failure - second call - cooldown, count = rc.ShouldCooldown(ruleFailure2) + cooldown, count = rc.ShouldCooldown(ruleFailure2.BaseRuntimeAlert.UniqueID, ruleFailure2.RuntimeProcessDetails.ContainerID, ruleFailure2.GetRuleId()) assert.False(t, cooldown) assert.Equal(t, 2, count) } @@ -283,7 +283,7 @@ func TestShouldCooldownMaxSize(t *testing.T) { // Fill up the cache for i := 0; i < maxSize; i++ { - failure := &ruleengine.GenericRuleFailure{ + failure := &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: fmt.Sprintf("test-alert-%d", i), }, @@ -291,11 +291,11 @@ func TestShouldCooldownMaxSize(t *testing.T) { ContainerID: fmt.Sprintf("test-container-%d", i), }, } - rc.ShouldCooldown(failure) + rc.ShouldCooldown(failure.BaseRuntimeAlert.UniqueID, failure.RuntimeProcessDetails.ContainerID, failure.GetRuleId()) } // Add one more to trigger eviction - newFailure := &ruleengine.GenericRuleFailure{ + newFailure := &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-new", }, @@ -305,12 +305,12 @@ func TestShouldCooldownMaxSize(t *testing.T) { } // Should not be in cooldown since it's a new entry - cooldown, count := rc.ShouldCooldown(newFailure) + cooldown, count := rc.ShouldCooldown(newFailure.BaseRuntimeAlert.UniqueID, newFailure.RuntimeProcessDetails.ContainerID, newFailure.GetRuleId()) assert.False(t, cooldown) assert.Equal(t, 1, count) // Verify the oldest entry was evicted by trying to access it - oldFailure := &ruleengine.GenericRuleFailure{ + oldFailure := &types.GenericRuleFailure{ BaseRuntimeAlert: apitypes.BaseRuntimeAlert{ UniqueID: "test-alert-0", }, @@ -320,7 +320,7 @@ func TestShouldCooldownMaxSize(t *testing.T) { } // Should not be in cooldown since it was evicted - cooldown, count = rc.ShouldCooldown(oldFailure) + cooldown, count = rc.ShouldCooldown(oldFailure.BaseRuntimeAlert.UniqueID, oldFailure.RuntimeProcessDetails.ContainerID, oldFailure.GetRuleId()) assert.False(t, cooldown) assert.Equal(t, 1, count) } diff --git a/pkg/rulemanager/rulecreator/factory.go b/pkg/rulemanager/rulecreator/factory.go new file mode 100644 index 000000000..7ad18cfc2 --- /dev/null +++ b/pkg/rulemanager/rulecreator/factory.go @@ -0,0 +1,174 @@ +package rulecreator + +import ( + "slices" + "sync" + + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/utils" +) + +var _ RuleCreator = (*RuleCreatorImpl)(nil) + +type RuleCreatorImpl struct { + mutex sync.RWMutex + Rules []typesv1.Rule +} + +func NewRuleCreator() *RuleCreatorImpl { + return &RuleCreatorImpl{} +} + +func (r *RuleCreatorImpl) CreateRulesByTags(tags []string) []typesv1.Rule { + var rules []typesv1.Rule + for _, rule := range r.Rules { + for _, tag := range tags { + if slices.Contains(rule.Tags, tag) { + rules = append(rules, rule) + break + } + } + } + return rules +} + +func (r *RuleCreatorImpl) CreateRuleByID(id string) typesv1.Rule { + for _, rule := range r.Rules { + if rule.ID == id { + return rule + } + } + return typesv1.Rule{} +} + +func (r *RuleCreatorImpl) CreateRuleByName(name string) typesv1.Rule { + for _, rule := range r.Rules { + if rule.Name == name { + return rule + } + } + return typesv1.Rule{} +} + +func (r *RuleCreatorImpl) RegisterRule(rule typesv1.Rule) { + r.Rules = append(r.Rules, rule) +} + +func (r *RuleCreatorImpl) CreateRulesByEventType(eventType utils.EventType) []typesv1.Rule { + var rules []typesv1.Rule + for _, rule := range r.Rules { + for _, expression := range rule.Expressions.RuleExpression { + if expression.EventType == eventType { + rules = append(rules, rule) + break + } + } + } + return rules +} + +func (r *RuleCreatorImpl) CreateRulePolicyRulesByEventType(eventType utils.EventType) []typesv1.Rule { + rules := r.CreateRulesByEventType(eventType) + for _, rule := range rules { + if rule.SupportPolicy { + rules = append(rules, rule) + } + } + + return rules +} + +func (r *RuleCreatorImpl) GetAllRuleIDs() []string { + r.mutex.RLock() + defer r.mutex.RUnlock() + + var ruleIDs []string + for _, rule := range r.Rules { + ruleIDs = append(ruleIDs, rule.ID) + } + return ruleIDs +} + +func (r *RuleCreatorImpl) CreateAllRules() []typesv1.Rule { + var rules []typesv1.Rule + for _, rule := range r.Rules { + rules = append(rules, rule) + } + return rules +} + +// SyncRules replaces the current rules with the new set of rules +// It removes rules that are no longer present and adds/updates existing ones +func (r *RuleCreatorImpl) SyncRules(newRules []typesv1.Rule) { + r.mutex.Lock() + defer r.mutex.Unlock() + + // Create a map of new rules by ID for quick lookup + newRuleMap := make(map[string]typesv1.Rule) + for _, rule := range newRules { + newRuleMap[rule.ID] = rule + } + + // Remove rules that are no longer present + var updatedRules []typesv1.Rule + for _, existingRule := range r.Rules { + if newRule, exists := newRuleMap[existingRule.ID]; exists { + // Rule still exists, use the new version + updatedRules = append(updatedRules, newRule) + delete(newRuleMap, existingRule.ID) // Mark as processed + } + // If rule doesn't exist in newRuleMap, it's removed (not added to updatedRules) + } + + // Add any completely new rules + for _, newRule := range newRuleMap { + updatedRules = append(updatedRules, newRule) + } + + r.Rules = updatedRules +} + +// RemoveRuleByID removes a rule with the given ID and returns true if found +func (r *RuleCreatorImpl) RemoveRuleByID(id string) bool { + r.mutex.Lock() + defer r.mutex.Unlock() + + for i, rule := range r.Rules { + if rule.ID == id { + // Remove the rule by slicing + r.Rules = append(r.Rules[:i], r.Rules[i+1:]...) + return true + } + } + return false +} + +// UpdateRule updates an existing rule or adds it if it doesn't exist +func (r *RuleCreatorImpl) UpdateRule(rule typesv1.Rule) bool { + r.mutex.Lock() + defer r.mutex.Unlock() + + for i, existingRule := range r.Rules { + if existingRule.ID == rule.ID { + r.Rules[i] = rule + return true + } + } + + // Rule not found, add it + r.Rules = append(r.Rules, rule) + return false +} + +// HasRule checks if a rule with the given ID exists +func (r *RuleCreatorImpl) HasRule(id string) bool { + r.mutex.RLock() + defer r.mutex.RUnlock() + + for _, rule := range r.Rules { + if rule.ID == id { + return true + } + } + return false +} diff --git a/pkg/rulemanager/rulecreator/ruleengine_interface.go b/pkg/rulemanager/rulecreator/ruleengine_interface.go new file mode 100644 index 000000000..fe3efeeba --- /dev/null +++ b/pkg/rulemanager/rulecreator/ruleengine_interface.go @@ -0,0 +1,35 @@ +package rulecreator + +import ( + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/utils" + + apitypes "github.com/armosec/armoapi-go/armotypes" +) + +// ProfileRequirement indicates how a rule uses profiles +type ProfileRequirement struct { + // ProfileDependency indicates if the rule requires a profile + ProfileDependency apitypes.ProfileDependency + + // ProfileType indicates what type of profile is needed (Application, Network, etc) + ProfileType apitypes.ProfileType +} + +// RuleCreator is an interface for creating rules by tags, IDs, and names +type RuleCreator interface { + CreateRulesByTags(tags []string) []typesv1.Rule + CreateRuleByID(id string) typesv1.Rule + CreateRuleByName(name string) typesv1.Rule + RegisterRule(rule typesv1.Rule) + CreateRulesByEventType(eventType utils.EventType) []typesv1.Rule + CreateRulePolicyRulesByEventType(eventType utils.EventType) []typesv1.Rule + CreateAllRules() []typesv1.Rule + GetAllRuleIDs() []string + + // Dynamic rule management methods for CRD sync + SyncRules(newRules []typesv1.Rule) + RemoveRuleByID(id string) bool + UpdateRule(rule typesv1.Rule) bool + HasRule(id string) bool +} diff --git a/pkg/rulemanager/rulecreator/ruleengine_mock.go b/pkg/rulemanager/rulecreator/ruleengine_mock.go new file mode 100644 index 000000000..a56f82f8b --- /dev/null +++ b/pkg/rulemanager/rulecreator/ruleengine_mock.go @@ -0,0 +1,93 @@ +package rulecreator + +import ( + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/utils" +) + +var _ RuleCreator = (*RuleCreatorMock)(nil) + +type RuleCreatorMock struct { + Rules []typesv1.Rule +} + +func (r *RuleCreatorMock) CreateRulesByTags(tags []string) []typesv1.Rule { + var rl []typesv1.Rule + for _, t := range tags { + rl = append(rl, typesv1.Rule{ + Name: t, + Tags: []string{t}, + }) + } + return rl +} + +func (r *RuleCreatorMock) CreateRuleByID(id string) typesv1.Rule { + return typesv1.Rule{ + ID: id, + } +} + +func (r *RuleCreatorMock) CreateRuleByName(name string) typesv1.Rule { + return typesv1.Rule{ + Name: name, + } +} + +func (r *RuleCreatorMock) RegisterRule(rule typesv1.Rule) { +} + +func (r *RuleCreatorMock) CreateRulesByEventType(eventType utils.EventType) []typesv1.Rule { + return []typesv1.Rule{} +} + +func (r *RuleCreatorMock) CreateRulePolicyRulesByEventType(eventType utils.EventType) []typesv1.Rule { + return []typesv1.Rule{} +} + +func (r *RuleCreatorMock) CreateAllRules() []typesv1.Rule { + return []typesv1.Rule{} +} + +func (r *RuleCreatorMock) GetAllRuleIDs() []string { + var ids []string + for _, rule := range r.Rules { + ids = append(ids, rule.ID) + } + return ids +} + +// Dynamic rule management methods for CRD sync +func (r *RuleCreatorMock) SyncRules(newRules []typesv1.Rule) { + r.Rules = newRules +} + +func (r *RuleCreatorMock) RemoveRuleByID(id string) bool { + for i, rule := range r.Rules { + if rule.ID == id { + r.Rules = append(r.Rules[:i], r.Rules[i+1:]...) + return true + } + } + return false +} + +func (r *RuleCreatorMock) UpdateRule(rule typesv1.Rule) bool { + for i, existingRule := range r.Rules { + if existingRule.ID == rule.ID { + r.Rules[i] = rule + return true + } + } + r.Rules = append(r.Rules, rule) + return false +} + +func (r *RuleCreatorMock) HasRule(id string) bool { + for _, rule := range r.Rules { + if rule.ID == id { + return true + } + } + return false +} diff --git a/pkg/rulemanager/rulepolicy.go b/pkg/rulemanager/rulepolicy.go new file mode 100644 index 000000000..b155d7a09 --- /dev/null +++ b/pkg/rulemanager/rulepolicy.go @@ -0,0 +1,32 @@ +package rulemanager + +import ( + "slices" + + "github.com/kubescape/node-agent/pkg/objectcache" + "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" +) + +type RulePolicyValidator struct { + objectCache objectcache.ObjectCache +} + +func NewRulePolicyValidator(objectCache objectcache.ObjectCache) *RulePolicyValidator { + return &RulePolicyValidator{ + objectCache: objectCache, + } +} + +func (v *RulePolicyValidator) Validate(ruleId string, process string, ap *v1beta1.ApplicationProfileContainer) (bool, error) { + if _, ok := ap.PolicyByRuleId[ruleId]; !ok { + return false, nil + } + + if policy, ok := ap.PolicyByRuleId[ruleId]; ok { + if policy.AllowedContainer || slices.Contains(policy.AllowedProcesses, process) { + return true, nil + } + } + + return false, nil +} diff --git a/pkg/rulemanager/ruleswatcher/README.md b/pkg/rulemanager/ruleswatcher/README.md new file mode 100644 index 000000000..9f265516c --- /dev/null +++ b/pkg/rulemanager/ruleswatcher/README.md @@ -0,0 +1,131 @@ +# Rule CRD Watcher + +This package implements a watcher for the Rule Custom Resource Definition (CRD) that automatically synchronizes rule definitions from Kubernetes CRDs with the RuleCreator component. + +## Overview + +The Rule CRD watcher provides the following functionality: + +1. **Full Sync Approach**: On any rule change (add/modify/delete), fetches all rules from cluster and syncs the complete set +2. **Enabled/Disabled Support**: Only processes rules that are marked as enabled +3. **Initial Sync**: Loads all existing rules from the cluster on startup +4. **Callback Notifications**: Notifies other components when rules are updated + +## Components + +### RulesWatcher Interface + +```go +type RulesWatcher interface { + watcher.Adaptor + InitialSync(ctx context.Context) error +} +``` + +### RulesWatcherImpl + +The main implementation that: +- Watches Rule CRDs for any changes (add/modify/delete) +- On any change, fetches ALL rules from the cluster +- Replaces all rules in RuleCreator with enabled rules from cluster +- Provides callback notifications + +## Usage + +```go +// Create a rule creator +ruleCreator := rulecreator.NewRuleCreator() + +// Define callback for rule updates +callback := func(rules []typesv1.Rule) { + // Handle rule updates + log.Printf("Updated %d rules", len(rules)) +} + +// Create the watcher +rulesWatcher := NewRulesWatcher(k8sClient, ruleCreator, callback) + +// Perform initial sync +if err := rulesWatcher.InitialSync(ctx); err != nil { + return err +} + +// Register with dynamic watcher +watchHandler.AddAdaptor(rulesWatcher) +``` + +## Rule CRD Structure + +The watcher expects Rule CRDs with the following structure: + +```yaml +apiVersion: kubescape.io/v1 +kind: Rule +metadata: + name: example-rule + namespace: default +spec: + enabled: true + id: "rule-001" + name: "Example Rule" + description: "An example security rule" + expressions: + message: "Security violation detected" + unique_id: "example-rule-001" + rule_expression: + - event_type: "exec" + expression: "process.name == 'suspicious'" + profile_dependency: + required: 0 + severity: 5 + support_policy: true + tags: ["security", "example"] + state: {} +``` + +## Features + +### Simple Full Sync Strategy + +Instead of tracking individual rule changes, the watcher uses a simple and reliable approach: + +1. **Any Change Detected** → Fetch all rules from cluster +2. **Filter Enabled Rules** → Only include rules with `enabled: true` +3. **Replace All Rules** → Use `SyncRules()` to replace the complete rule set in RuleCreator + +This approach is: +- **Simple**: No complex change tracking logic +- **Reliable**: Always consistent with cluster state +- **Safe**: No risk of missing updates or partial states + +### Thread Safety + +The RuleCreator implementation includes proper mutex locking to handle concurrent access safely. + +### Extended RuleCreator Interface + +Key method used for dynamic rule management: + +```go +type RuleCreator interface { + // ... existing methods ... + + // Dynamic rule management + SyncRules(newRules []typesv1.Rule) // Replaces all rules with new set + // ... other helper methods ... +} +``` + +## Integration + +The watcher integrates with the existing dynamic watcher system and can be used alongside other watchers like the RuleBinding cache. + +### Event Flow + +``` +Rule CRD Change → AddHandler/ModifyHandler/DeleteHandler → +Fetch All Rules from Cluster → Filter Enabled Rules → +SyncRules() → Callback Notification +``` + +This ensures that any rule change (including enable/disable) is immediately reflected in the RuleCreator. \ No newline at end of file diff --git a/pkg/rulemanager/ruleswatcher/version_test.go b/pkg/rulemanager/ruleswatcher/version_test.go new file mode 100644 index 000000000..d767a5588 --- /dev/null +++ b/pkg/rulemanager/ruleswatcher/version_test.go @@ -0,0 +1,301 @@ +package ruleswatcher + +import ( + "os" + "testing" +) + +func TestIsAgentVersionCompatible(t *testing.T) { + // Store original value to restore later + originalVersion := os.Getenv("AGENT_VERSION") + defer func() { + if originalVersion == "" { + os.Unsetenv("AGENT_VERSION") + } else { + os.Setenv("AGENT_VERSION", originalVersion) + } + }() + + tests := []struct { + name string + agentVersion string + requirement string + expected bool + description string + }{ + { + name: "exact version match", + agentVersion: "1.2.3", + requirement: "1.2.3", + expected: true, + description: "Should match exact version", + }, + { + name: "greater than requirement satisfied", + agentVersion: "1.3.0", + requirement: ">=1.2.0", + expected: true, + description: "Should satisfy greater than or equal requirement", + }, + { + name: "greater than requirement not satisfied", + agentVersion: "1.1.0", + requirement: ">=1.2.0", + expected: false, + description: "Should not satisfy greater than or equal requirement", + }, + { + name: "less than requirement satisfied", + agentVersion: "1.1.5", + requirement: "<1.2.0", + expected: true, + description: "Should satisfy less than requirement", + }, + { + name: "less than requirement not satisfied", + agentVersion: "1.2.0", + requirement: "<1.2.0", + expected: false, + description: "Should not satisfy less than requirement", + }, + { + name: "range requirement satisfied", + agentVersion: "1.5.2", + requirement: ">=1.2.0, <2.0.0", + expected: true, + description: "Should satisfy range requirement", + }, + { + name: "range requirement not satisfied - too low", + agentVersion: "1.1.0", + requirement: ">=1.2.0, <2.0.0", + expected: false, + description: "Should not satisfy range requirement when version is too low", + }, + { + name: "range requirement not satisfied - too high", + agentVersion: "2.1.0", + requirement: ">=1.2.0, <2.0.0", + expected: false, + description: "Should not satisfy range requirement when version is too high", + }, + { + name: "tilde constraint satisfied", + agentVersion: "1.2.5", + requirement: "~1.2.0", + expected: true, + description: "Should satisfy tilde constraint (patch level changes)", + }, + { + name: "tilde constraint not satisfied", + agentVersion: "1.3.0", + requirement: "~1.2.0", + expected: false, + description: "Should not satisfy tilde constraint for minor version change", + }, + { + name: "caret constraint satisfied", + agentVersion: "1.5.0", + requirement: "^1.2.0", + expected: true, + description: "Should satisfy caret constraint (compatible changes)", + }, + { + name: "caret constraint not satisfied", + agentVersion: "2.0.0", + requirement: "^1.2.0", + expected: false, + description: "Should not satisfy caret constraint for major version change", + }, + { + name: "prerelease version - less than normal", + agentVersion: "1.2.3-alpha.1", + requirement: ">=1.2.0", + expected: false, + description: "Prerelease versions are considered less than normal versions in semver", + }, + { + name: "prerelease version - explicit prerelease constraint", + agentVersion: "1.2.3-alpha.1", + requirement: ">=1.2.3-alpha.0", + expected: true, + description: "Prerelease versions can satisfy explicit prerelease constraints", + }, + { + name: "build metadata version", + agentVersion: "1.2.3+build.1", + requirement: ">=1.2.0", + expected: true, + description: "Should handle build metadata in versions", + }, + { + name: "empty agent version should allow all", + agentVersion: "", + requirement: ">=1.2.0", + expected: true, + description: "Should allow all rules when AGENT_VERSION is not set", + }, + { + name: "invalid agent version should allow all", + agentVersion: "invalid-version", + requirement: ">=1.2.0", + expected: true, + description: "Should allow all rules when AGENT_VERSION is invalid", + }, + { + name: "invalid requirement should allow all", + agentVersion: "1.2.3", + requirement: "invalid-requirement", + expected: true, + description: "Should allow all rules when requirement is invalid", + }, + { + name: "empty requirement string", + agentVersion: "1.2.3", + requirement: "", + expected: true, + description: "Should allow rule when requirement is empty", + }, + { + name: "complex version comparison with prereleases", + agentVersion: "2.1.0-beta.1", + requirement: ">=2.0.0-alpha, <3.0.0-0", + expected: true, + description: "Should handle complex version comparisons with explicit prerelease bounds", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set the environment variable + if tt.agentVersion == "" { + os.Unsetenv("AGENT_VERSION") + } else { + os.Setenv("AGENT_VERSION", tt.agentVersion) + } + + result := isAgentVersionCompatible(tt.requirement) + if result != tt.expected { + t.Errorf("isAgentVersionCompatible(%q) with AGENT_VERSION=%q = %v, want %v\nDescription: %s", + tt.requirement, tt.agentVersion, result, tt.expected, tt.description) + } + }) + } +} + +func TestIsAgentVersionCompatible_EdgeCases(t *testing.T) { + originalVersion := os.Getenv("AGENT_VERSION") + defer func() { + if originalVersion == "" { + os.Unsetenv("AGENT_VERSION") + } else { + os.Setenv("AGENT_VERSION", originalVersion) + } + }() + + // Test with various malformed versions + malformedTests := []struct { + name string + agentVersion string + requirement string + expected bool + }{ + { + name: "version with v prefix", + agentVersion: "v1.2.3", + requirement: ">=1.2.0", + expected: true, // semver library should handle v prefix + }, + { + name: "version without patch", + agentVersion: "1.2", + requirement: ">=1.2.0", + expected: true, // should work with missing patch + }, + { + name: "requirement with spaces", + agentVersion: "1.2.3", + requirement: " >= 1.2.0 ", + expected: true, // should handle whitespace + }, + { + name: "version with extra dots", + agentVersion: "1.2.3.4", + requirement: ">=1.2.0", + expected: true, // should handle build versions + }, + } + + for _, tt := range malformedTests { + t.Run(tt.name, func(t *testing.T) { + os.Setenv("AGENT_VERSION", tt.agentVersion) + result := isAgentVersionCompatible(tt.requirement) + if result != tt.expected { + t.Errorf("isAgentVersionCompatible(%q) with AGENT_VERSION=%q = %v, want %v", + tt.requirement, tt.agentVersion, result, tt.expected) + } + }) + } +} + +// Benchmark the version compatibility function +func BenchmarkIsAgentVersionCompatible(b *testing.B) { + os.Setenv("AGENT_VERSION", "1.2.3") + requirement := ">=1.2.0, <2.0.0" + + b.ResetTimer() + for i := 0; i < b.N; i++ { + isAgentVersionCompatible(requirement) + } +} + +// Test that ensures the function doesn't panic with various inputs +func TestIsAgentVersionCompatible_NoPanic(t *testing.T) { + originalVersion := os.Getenv("AGENT_VERSION") + defer func() { + if originalVersion == "" { + os.Unsetenv("AGENT_VERSION") + } else { + os.Setenv("AGENT_VERSION", originalVersion) + } + }() + + // Test inputs that might cause panics + testInputs := []struct { + agentVersion string + requirement string + }{ + {"", ""}, + {"1.2.3", ""}, + {"", ">=1.2.0"}, + {"null", "null"}, + {"1.2.3", "!@#$%"}, + {"!@#$%", ">=1.2.0"}, + {"1.2.3", ">="}, + {"1.2.3", "<"}, + {"1.2.3", "~"}, + {"1.2.3", "^"}, + {"999999999999999999999999999.0.0", ">=1.0.0"}, + {"1.2.3", ">=999999999999999999999999999.0.0"}, + } + + for _, input := range testInputs { + t.Run(input.agentVersion+"_"+input.requirement, func(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Errorf("isAgentVersionCompatible panicked with agentVersion=%q, requirement=%q: %v", + input.agentVersion, input.requirement, r) + } + }() + + if input.agentVersion == "" { + os.Unsetenv("AGENT_VERSION") + } else { + os.Setenv("AGENT_VERSION", input.agentVersion) + } + + // Should not panic regardless of input + _ = isAgentVersionCompatible(input.requirement) + }) + } +} diff --git a/pkg/rulemanager/ruleswatcher/watcher.go b/pkg/rulemanager/ruleswatcher/watcher.go new file mode 100644 index 000000000..45782beb2 --- /dev/null +++ b/pkg/rulemanager/ruleswatcher/watcher.go @@ -0,0 +1,166 @@ +package ruleswatcher + +import ( + "context" + "os" + + "github.com/Masterminds/semver/v3" + "github.com/kubescape/go-logger" + "github.com/kubescape/go-logger/helpers" + "github.com/kubescape/node-agent/pkg/k8sclient" + "github.com/kubescape/node-agent/pkg/rulemanager/rulecreator" + typesv1 "github.com/kubescape/node-agent/pkg/rulemanager/types/v1" + "github.com/kubescape/node-agent/pkg/watcher" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + k8sruntime "k8s.io/apimachinery/pkg/runtime" +) + +var _ RulesWatcher = (*RulesWatcherImpl)(nil) + +type RulesWatcherImpl struct { + ruleCreator rulecreator.RuleCreator + k8sClient k8sclient.K8sClientInterface + callback RulesWatcherCallback + watchResources []watcher.WatchResource +} + +func NewRulesWatcher(k8sClient k8sclient.K8sClientInterface, ruleCreator rulecreator.RuleCreator, callback RulesWatcherCallback) *RulesWatcherImpl { + return &RulesWatcherImpl{ + ruleCreator: ruleCreator, + k8sClient: k8sClient, + callback: callback, + watchResources: []watcher.WatchResource{ + watcher.NewWatchResource(typesv1.RuleGvr, metav1.ListOptions{}), + }, + } +} + +func (w *RulesWatcherImpl) WatchResources() []watcher.WatchResource { + return w.watchResources +} + +func (w *RulesWatcherImpl) AddHandler(ctx context.Context, obj runtime.Object) { + logger.L().Debug("RulesWatcher - rule added, syncing all rules") + w.syncAllRulesAndNotify(ctx) +} + +func (w *RulesWatcherImpl) ModifyHandler(ctx context.Context, obj runtime.Object) { + logger.L().Debug("RulesWatcher - rule modified, syncing all rules") + w.syncAllRulesAndNotify(ctx) +} + +func (w *RulesWatcherImpl) DeleteHandler(ctx context.Context, obj runtime.Object) { + logger.L().Debug("RulesWatcher - rule deleted, syncing all rules") + w.syncAllRulesAndNotify(ctx) +} + +func (w *RulesWatcherImpl) syncAllRulesAndNotify(ctx context.Context) { + if err := w.syncAllRulesFromCluster(ctx); err != nil { + logger.L().Warning("RulesWatcher - failed to sync all rules from cluster", helpers.Error(err)) + return + } + + if w.callback != nil { + w.callback() + logger.L().Debug("RulesWatcher - notified callback with updated rules") + } +} + +// syncAllRulesFromCluster fetches all rules from the cluster and syncs them with the rule creator. +// Rules are filtered by: +// 1. Enabled status - only enabled rules are considered +// 2. Agent version compatibility - rules with AgentVersionRequirement are checked against AGENT_VERSION env var using semver +func (w *RulesWatcherImpl) syncAllRulesFromCluster(ctx context.Context) error { + unstructuredList, err := w.k8sClient.GetDynamicClient().Resource(typesv1.RuleGvr).List(ctx, metav1.ListOptions{}) + if err != nil { + return err + } + + var enabledRules []typesv1.Rule + var skippedVersionCount int + for _, item := range unstructuredList.Items { + rules, err := unstructuredToRules(&item) + if err != nil { + logger.L().Warning("RulesWatcher - failed to convert rule during sync", helpers.Error(err)) + continue + } + for _, rule := range rules.Spec.Rules { + if rule.Enabled { + // Check agent version requirement if specified + if rule.AgentVersionRequirement != "" { + if !isAgentVersionCompatible(rule.AgentVersionRequirement) { + logger.L().Debug("RulesWatcher - skipping rule due to agent version requirement", + helpers.String("ruleID", rule.ID), + helpers.String("requirement", rule.AgentVersionRequirement), + helpers.String("agentVersion", os.Getenv("AGENT_VERSION"))) + skippedVersionCount++ + continue + } + } + enabledRules = append(enabledRules, rule) + } + } + } + + w.ruleCreator.SyncRules(enabledRules) + + logger.L().Info("RulesWatcher - synced rules from cluster", + helpers.Int("enabledRules", len(enabledRules)), + helpers.Int("totalRules", len(unstructuredList.Items)), + helpers.Int("skippedByVersion", skippedVersionCount)) + return nil +} + +func (w *RulesWatcherImpl) InitialSync(ctx context.Context) error { + return w.syncAllRulesFromCluster(ctx) +} + +func unstructuredToRules(obj *unstructured.Unstructured) (*typesv1.Rules, error) { + rule := &typesv1.Rules{} + if err := k8sruntime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &rule); err != nil { + return nil, err + } + + return rule, nil +} + +// isAgentVersionCompatible checks if the current agent version satisfies the given requirement +// using semantic versioning constraints. Returns true if compatible, false otherwise. +func isAgentVersionCompatible(requirement string) bool { + agentVersion := os.Getenv("AGENT_VERSION") + if agentVersion == "" { + // If AGENT_VERSION is not set, log a warning and allow all rules for backward compatibility + logger.L().Warning("RulesWatcher - AGENT_VERSION environment variable not set, allowing all rules") + return true + } + + // Parse the agent version + currentVersion, err := semver.NewVersion(agentVersion) + if err != nil { + logger.L().Warning("RulesWatcher - invalid agent version format", + helpers.String("agentVersion", agentVersion), + helpers.Error(err)) + return true // Allow rule if we can't parse current version + } + + // Parse the requirement constraint + constraint, err := semver.NewConstraint(requirement) + if err != nil { + logger.L().Warning("RulesWatcher - invalid version constraint in rule", + helpers.String("constraint", requirement), + helpers.Error(err)) + return true // Allow rule if we can't parse the constraint + } + + // Check if current version satisfies the constraint + compatible := constraint.Check(currentVersion) + + logger.L().Debug("RulesWatcher - version compatibility check", + helpers.String("agentVersion", agentVersion), + helpers.String("requirement", requirement), + helpers.Interface("compatible", compatible)) + + return compatible +} diff --git a/pkg/rulemanager/ruleswatcher/watcher_interface.go b/pkg/rulemanager/ruleswatcher/watcher_interface.go new file mode 100644 index 000000000..3a72966bb --- /dev/null +++ b/pkg/rulemanager/ruleswatcher/watcher_interface.go @@ -0,0 +1,14 @@ +package ruleswatcher + +import ( + "context" + + "github.com/kubescape/node-agent/pkg/watcher" +) + +type RulesWatcher interface { + watcher.Adaptor + InitialSync(ctx context.Context) error +} + +type RulesWatcherCallback = func() diff --git a/pkg/rulemanager/types/api.go b/pkg/rulemanager/types/api.go new file mode 100644 index 000000000..9bf50c8a9 --- /dev/null +++ b/pkg/rulemanager/types/api.go @@ -0,0 +1,7 @@ +package types + +const ( + RuleGroup string = "kubescape.io" + RuleKind string = "Rule" + RulePlural string = "rules" +) diff --git a/pkg/ruleengine/v1/failureobj.go b/pkg/rulemanager/types/failure.go similarity index 63% rename from pkg/ruleengine/v1/failureobj.go rename to pkg/rulemanager/types/failure.go index cbba475ac..5a27acf45 100644 --- a/pkg/ruleengine/v1/failureobj.go +++ b/pkg/rulemanager/types/failure.go @@ -1,14 +1,15 @@ -package ruleengine +package types import ( - "github.com/kubescape/node-agent/pkg/ruleengine" - apitypes "github.com/armosec/armoapi-go/armotypes" "github.com/armosec/utils-k8s-go/wlid" igtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" ) -var _ ruleengine.RuleFailure = (*GenericRuleFailure)(nil) +const ( + ApplicationProfile = "applicationprofile" + NetworkProfile = "networkprofile" +) type GenericRuleFailure struct { BaseRuntimeAlert apitypes.BaseRuntimeAlert @@ -24,6 +25,52 @@ type GenericRuleFailure struct { Extra interface{} } +type RuleFailure interface { + // Get Base Runtime Alert + GetBaseRuntimeAlert() apitypes.BaseRuntimeAlert + // Get Alert Type + GetAlertType() apitypes.AlertType + // Get Runtime Process Details + GetRuntimeProcessDetails() apitypes.ProcessTree + // Get Trigger Event + GetTriggerEvent() igtypes.Event + // Get Rule Description + GetRuleAlert() apitypes.RuleAlert + // Get K8s Runtime Details + GetRuntimeAlertK8sDetails() apitypes.RuntimeAlertK8sDetails + // Get Rule ID + GetRuleId() string + // Get Cloud Services + GetCloudServices() []string + // Get Http Details + GetHttpRuleAlert() apitypes.HttpRuleAlert + // Get Alert Platform + GetAlertPlatform() apitypes.AlertSourcePlatform + // Get Extra + GetExtra() interface{} + + // Set Workload Details + SetWorkloadDetails(workloadDetails string) + // Set Base Runtime Alert + SetBaseRuntimeAlert(baseRuntimeAlert apitypes.BaseRuntimeAlert) + // Set Runtime Process Details + SetRuntimeProcessDetails(runtimeProcessDetails apitypes.ProcessTree) + // Set Trigger Event + SetTriggerEvent(triggerEvent igtypes.Event) + // Set Rule Description + SetRuleAlert(ruleAlert apitypes.RuleAlert) + // Set K8s Runtime Details + SetRuntimeAlertK8sDetails(runtimeAlertK8sDetails apitypes.RuntimeAlertK8sDetails) + // Set Cloud Services + SetCloudServices(cloudServices []string) + // Set Alert Platform + SetAlertPlatform(alertPlatform apitypes.AlertSourcePlatform) + // Set Http Rule Alert + SetHttpRuleAlert(httpRuleAlert apitypes.HttpRuleAlert) + // Set Extra + SetExtra(extra interface{}) +} + func (rule *GenericRuleFailure) GetBaseRuntimeAlert() apitypes.BaseRuntimeAlert { return rule.BaseRuntimeAlert } @@ -106,3 +153,11 @@ func (rule *GenericRuleFailure) SetWorkloadDetails(workloadDetails string) { func (rule *GenericRuleFailure) SetAlertPlatform(alertPlatform apitypes.AlertSourcePlatform) { rule.AlertPlatform = alertPlatform } + +func (rule *GenericRuleFailure) SetHttpRuleAlert(httpRuleAlert apitypes.HttpRuleAlert) { + rule.HttpRuleAlert = httpRuleAlert +} + +func (rule *GenericRuleFailure) SetExtra(extra interface{}) { + rule.Extra = extra +} diff --git a/pkg/ruleengine/types/types.go b/pkg/rulemanager/types/syscall.go similarity index 80% rename from pkg/ruleengine/types/types.go rename to pkg/rulemanager/types/syscall.go index 6f85073fb..baba25cb3 100644 --- a/pkg/ruleengine/types/types.go +++ b/pkg/rulemanager/types/syscall.go @@ -2,7 +2,6 @@ package types import ( eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/node-agent/pkg/ruleengine" ) type SyscallEvent struct { @@ -18,5 +17,9 @@ type SyscallEvent struct { } type Enricher interface { - EnrichRuleFailure(rule ruleengine.RuleFailure) error + EnrichRuleFailure(rule RuleFailure) error +} + +func (e SyscallEvent) GetNamespace() string { + return e.Event.K8s.Namespace } diff --git a/pkg/rulemanager/types/v1/api.go b/pkg/rulemanager/types/v1/api.go new file mode 100644 index 000000000..7242b4bd6 --- /dev/null +++ b/pkg/rulemanager/types/v1/api.go @@ -0,0 +1,17 @@ +package types + +import ( + "github.com/kubescape/node-agent/pkg/rulemanager/types" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // RuleVersion is the version of Rule + RuleVersion string = "v1" +) + +var RuleGvr = schema.GroupVersionResource{ + Group: types.RuleGroup, + Version: RuleVersion, + Resource: types.RulePlural, +} diff --git a/pkg/rulemanager/types/v1/types.go b/pkg/rulemanager/types/v1/types.go new file mode 100644 index 000000000..1269ddbb3 --- /dev/null +++ b/pkg/rulemanager/types/v1/types.go @@ -0,0 +1,43 @@ +package types + +import ( + apitypes "github.com/armosec/armoapi-go/armotypes" + "github.com/kubescape/node-agent/pkg/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Rules struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RulesSpec `json:"spec,omitempty"` +} + +type RulesSpec struct { + Rules []Rule `json:"rules" yaml:"rules"` +} + +type Rule struct { + Enabled bool `json:"enabled" yaml:"enabled"` + ID string `json:"id" yaml:"id"` + Name string `json:"name" yaml:"name"` + Description string `json:"description" yaml:"description"` + Expressions RuleExpressions `json:"expressions" yaml:"expressions"` + ProfileDependency apitypes.ProfileDependency `json:"profile_dependency" yaml:"profile_dependency"` + Severity int `json:"severity" yaml:"severity"` + SupportPolicy bool `json:"support_policy" yaml:"support_policy"` + Tags []string `json:"tags" yaml:"tags"` + State map[string]any `json:"state,omitempty" yaml:"state,omitempty"` + AgentVersionRequirement string `json:"agent_version_requirement" yaml:"agent_version_requirement"` +} + +type RuleExpressions struct { + Message string `json:"message" yaml:"message"` + UniqueID string `json:"unique_id" yaml:"unique_id"` + RuleExpression []RuleExpression `json:"rule_expression" yaml:"rule_expression"` +} + +type RuleExpression struct { + EventType utils.EventType `json:"event_type" yaml:"event_type"` + Expression string `json:"expression" yaml:"expression"` +} diff --git a/pkg/rulemanager/v1/rule_manager.go b/pkg/rulemanager/v1/rule_manager.go deleted file mode 100644 index cf8b2b946..000000000 --- a/pkg/rulemanager/v1/rule_manager.go +++ /dev/null @@ -1,510 +0,0 @@ -package rulemanager - -import ( - "context" - "errors" - "fmt" - "path/filepath" - "reflect" - "strings" - "time" - - apitypes "github.com/armosec/armoapi-go/armotypes" - - "github.com/armosec/armoapi-go/armotypes" - backoffv5 "github.com/cenkalti/backoff/v5" - mapset "github.com/deckarep/golang-set/v2" - "github.com/dustin/go-humanize" - "github.com/goradd/maps" - containercollection "github.com/inspektor-gadget/inspektor-gadget/pkg/container-collection" - eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" - "github.com/kubescape/go-logger" - "github.com/kubescape/go-logger/helpers" - helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" - "github.com/kubescape/node-agent/pkg/config" - "github.com/kubescape/node-agent/pkg/dnsmanager" - "github.com/kubescape/node-agent/pkg/ebpf/events" - "github.com/kubescape/node-agent/pkg/exporters" - "github.com/kubescape/node-agent/pkg/k8sclient" - "github.com/kubescape/node-agent/pkg/metricsmanager" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/processtree" - bindingcache "github.com/kubescape/node-agent/pkg/rulebindingmanager" - "github.com/kubescape/node-agent/pkg/ruleengine" - ruleenginetypes "github.com/kubescape/node-agent/pkg/ruleengine/types" - "github.com/kubescape/node-agent/pkg/rulemanager" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/rulecooldown" - "github.com/kubescape/node-agent/pkg/rulemanager/v1/ruleprocess" - "github.com/kubescape/node-agent/pkg/utils" - corev1 "k8s.io/api/core/v1" -) - -const ( - maxFileSize = 50 * 1024 * 1024 // 50MB - syscallPeriod = 5 * time.Second -) - -type RuleManager struct { - cfg config.Config - ruleBindingCache bindingcache.RuleBindingCache - trackedContainers mapset.Set[string] // key is k8sContainerID - k8sClient k8sclient.K8sClientInterface - ctx context.Context - objectCache objectcache.ObjectCache - exporter exporters.Exporter - metrics metricsmanager.MetricsManager - syscallPeekFunc func(nsMountId uint64) ([]string, error) - podToWlid maps.SafeMap[string, string] // key is namespace/podName - nodeName string - clusterName string - containerIdToShimPid maps.SafeMap[string, uint32] - containerIdToPid maps.SafeMap[string, uint32] - enricher ruleenginetypes.Enricher - processManager processtree.ProcessTreeManager - dnsManager dnsmanager.DNSResolver - ruleCooldown *rulecooldown.RuleCooldown -} - -var _ rulemanager.RuleManagerClient = (*RuleManager)(nil) - -func CreateRuleManager(ctx context.Context, cfg config.Config, k8sClient k8sclient.K8sClientInterface, ruleBindingCache bindingcache.RuleBindingCache, objectCache objectcache.ObjectCache, exporter exporters.Exporter, metrics metricsmanager.MetricsManager, nodeName string, clusterName string, processManager processtree.ProcessTreeManager, dnsManager dnsmanager.DNSResolver, enricher ruleenginetypes.Enricher, ruleCooldown *rulecooldown.RuleCooldown) (*RuleManager, error) { - return &RuleManager{ - cfg: cfg, - ctx: ctx, - k8sClient: k8sClient, - trackedContainers: mapset.NewSet[string](), - ruleBindingCache: ruleBindingCache, - objectCache: objectCache, - exporter: exporter, - metrics: metrics, - nodeName: nodeName, - clusterName: clusterName, - enricher: enricher, - processManager: processManager, - dnsManager: dnsManager, - ruleCooldown: ruleCooldown, - }, nil -} - -func (rm *RuleManager) monitorContainer(container *containercollection.Container, k8sContainerID string) error { - logger.L().Debug("RuleManager - start monitor on container", - helpers.String("container ID", container.Runtime.ContainerID), - helpers.String("k8s container id", k8sContainerID)) - - syscallTicker := time.NewTicker(syscallPeriod) - - for { - select { - case <-rm.ctx.Done(): - logger.L().Debug("RuleManager - stop monitor on container", - helpers.String("container ID", container.Runtime.ContainerID), - helpers.String("k8s container id", k8sContainerID)) - return nil - case <-syscallTicker.C: - if rm.syscallPeekFunc == nil { - logger.L().Debug("RuleManager - syscallPeekFunc is not set", helpers.String("container ID", container.Runtime.ContainerID)) - continue - } - - if container.Mntns == 0 { - logger.L().Debug("RuleManager - mount namespace ID is not set", helpers.String("container ID", container.Runtime.ContainerID)) - } - - if !rm.trackedContainers.Contains(k8sContainerID) { - logger.L().Debug("RuleManager - container is not tracked", helpers.String("container ID", container.Runtime.ContainerID)) - return nil - } - - var syscalls []string - if syscallsFromFunc, err := rm.syscallPeekFunc(container.Mntns); err == nil { - syscalls = syscallsFromFunc - } - - if len(syscalls) == 0 { - continue - } - - rules := rm.ruleBindingCache.ListRulesForPod(container.K8s.Namespace, container.K8s.PodName) - for _, syscall := range syscalls { - event := ruleenginetypes.SyscallEvent{ - Event: eventtypes.Event{ - Timestamp: eventtypes.Time(time.Now().UnixNano()), - Type: eventtypes.NORMAL, - CommonData: eventtypes.CommonData{ - Runtime: eventtypes.BasicRuntimeMetadata{ - ContainerID: container.Runtime.ContainerID, - RuntimeName: container.Runtime.RuntimeName, - }, - K8s: eventtypes.K8sMetadata{ - Node: rm.nodeName, - BasicK8sMetadata: eventtypes.BasicK8sMetadata{ - Namespace: container.K8s.Namespace, - PodName: container.K8s.PodName, - PodLabels: container.K8s.PodLabels, - ContainerName: container.K8s.ContainerName, - }, - }, - }, - }, - WithMountNsID: eventtypes.WithMountNsID{ - MountNsID: container.Mntns, - }, - Pid: container.ContainerPid(), - // TODO: Figure out how to get UID, GID and comm from the syscall. - // Uid: container.OciConfig.Process.User.UID, - // Gid: container.OciConfig.Process.User.GID, - // Comm: container.OciConfig.Process.Args[0], - SyscallName: syscall, - } - failures := rm.processEvent(utils.SyscallEventType, &event, rules) - for _, failure := range failures { - - tree, err := rm.processManager.GetContainerProcessTree(container.Runtime.ContainerID, event.Pid, true) - if err != nil { - tree = apitypes.Process{ - PID: event.Pid, - } - } - - runtimeProcessDetails := failure.GetRuntimeProcessDetails() - runtimeProcessDetails.ProcessTree = tree - failure.SetRuntimeProcessDetails(runtimeProcessDetails) - - rm.exporter.SendRuleAlert(failure) - } - } - } - } -} - -func (rm *RuleManager) startRuleManager(container *containercollection.Container, k8sContainerID string) { - sharedData, err := rm.waitForSharedContainerData(container.Runtime.ContainerID) - if err != nil { - logger.L().Error("RuleManager - failed to get shared container data", helpers.Error(err)) - return - } - - podID := utils.CreateK8sPodID(container.K8s.Namespace, container.K8s.PodName) - if !rm.podToWlid.Has(podID) { - w := sharedData.Wlid - if w != "" { - rm.podToWlid.Set(podID, w) - } else { - logger.L().Debug("RuleManager - failed to get workload identifier", helpers.String("k8s workload", container.K8s.PodName)) - } - } - - if err := rm.monitorContainer(container, k8sContainerID); err != nil { - logger.L().Debug("RuleManager - stop monitor on container", helpers.String("reason", err.Error()), - helpers.String("container ID", container.Runtime.ContainerID), - helpers.String("k8s container id", k8sContainerID)) - } -} - -func (rm *RuleManager) ContainerCallback(notif containercollection.PubSubEvent) { - // check if the container should be ignored - if rm.cfg.IgnoreContainer(notif.Container.K8s.Namespace, notif.Container.K8s.PodName, notif.Container.K8s.PodLabels) { - return - } - - k8sContainerID := utils.CreateK8sContainerID(notif.Container.K8s.Namespace, notif.Container.K8s.PodName, notif.Container.K8s.ContainerName) - - switch notif.Type { - case containercollection.EventTypeAddContainer: - logger.L().Debug("RuleManager - add container", - helpers.String("container ID", notif.Container.Runtime.ContainerID), - helpers.String("k8s workload", k8sContainerID)) - - if rm.trackedContainers.Contains(k8sContainerID) { - logger.L().Debug("RuleManager - container already exist in memory", - helpers.String("container ID", notif.Container.Runtime.ContainerID), - helpers.String("k8s workload", k8sContainerID)) - return - } - - rm.trackedContainers.Add(k8sContainerID) - shim, err := utils.GetProcessStat(int(notif.Container.ContainerPid())) - if err != nil { - logger.L().Warning("RuleManager - failed to get shim process", helpers.Error(err)) - } else { - rm.containerIdToShimPid.Set(notif.Container.Runtime.ContainerID, uint32(shim.PPID)) - } - rm.containerIdToPid.Set(notif.Container.Runtime.ContainerID, notif.Container.ContainerPid()) - go rm.startRuleManager(notif.Container, k8sContainerID) - case containercollection.EventTypeRemoveContainer: - logger.L().Debug("RuleManager - remove container", - helpers.String("container ID", notif.Container.Runtime.ContainerID), - helpers.String("k8s workload", k8sContainerID)) - - rm.trackedContainers.Remove(k8sContainerID) - namespace := notif.Container.K8s.Namespace - podName := notif.Container.K8s.PodName - podID := utils.CreateK8sPodID(namespace, podName) - - time.AfterFunc(10*time.Minute, func() { - stillTracked := false - rm.trackedContainers.Each(func(id string) bool { - // Parse the container ID to reliably extract the pod info - parts := strings.Split(id, "/") - if len(parts) == 3 && parts[0] == namespace && parts[1] == podName { - stillTracked = true - return true // We found a match, can stop iteration - } - return false // No match yet, continue looking - }) - - if !stillTracked { - logger.L().Debug("RuleManager - removing pod from podToWlid map", - helpers.String("podID", podID)) - rm.podToWlid.Delete(podID) - } else { - logger.L().Debug("RuleManager - keeping pod in podToWlid map due to active containers", - helpers.String("podID", podID)) - } - }) - - rm.containerIdToShimPid.Delete(notif.Container.Runtime.ContainerID) - rm.containerIdToPid.Delete(notif.Container.Runtime.ContainerID) - } -} - -func (rm *RuleManager) waitForSharedContainerData(containerID string) (*objectcache.WatchedContainerData, error) { - return backoffv5.Retry(context.Background(), func() (*objectcache.WatchedContainerData, error) { - if sharedData := rm.objectCache.K8sObjectCache().GetSharedContainerData(containerID); sharedData != nil { - return sharedData, nil - } - return nil, fmt.Errorf("container %s not found in shared data", containerID) - }, backoffv5.WithBackOff(backoffv5.NewExponentialBackOff())) -} - -func (rm *RuleManager) RegisterPeekFunc(peek func(mntns uint64) ([]string, error)) { - rm.syscallPeekFunc = peek -} - -func (rm *RuleManager) ReportEnrichedEvent(enrichedEvent *events.EnrichedEvent) { - event := enrichedEvent.Event - if event.GetNamespace() == "" || event.GetPod() == "" { - logger.L().Warning("RuleManager - failed to get namespace and pod name from custom event", - helpers.String("event", fmt.Sprintf("%+v", enrichedEvent)), - helpers.String("eventType", string(enrichedEvent.EventType))) - return - } - - // list custom rules - rules := rm.ruleBindingCache.ListRulesForPod(event.GetNamespace(), event.GetPod()) - - failures := rm.processEvent(enrichedEvent.EventType, event, rules) - for _, failure := range failures { - if enrichedEvent.ProcessTree.PID == 0 { - process := apitypes.Process{ - PID: enrichedEvent.PID, - PPID: enrichedEvent.PPID, - } - - if tree, err := utils.CreateProcessTree(&process, - rm.containerIdToShimPid.Get(enrichedEvent.ContainerID)); err == nil { - enrichedEvent.ProcessTree = tree - } else { - logger.L().Error("RuleManager - failed to create process tree fallback", helpers.Error(err)) - enrichedEvent.ProcessTree = process - } - } - runtimeProcessDetails := failure.GetRuntimeProcessDetails() - runtimeProcessDetails.ProcessTree = enrichedEvent.ProcessTree - failure.SetRuntimeProcessDetails(runtimeProcessDetails) - rm.exporter.SendRuleAlert(failure) - } -} - -func (rm *RuleManager) ReportEvent(eventType utils.EventType, event utils.K8sEvent) { - logger.L().Info("RuleManager - Deprecated - ReportEvent") -} - -func (rm *RuleManager) processEvent(eventType utils.EventType, event utils.K8sEvent, rules []ruleengine.RuleEvaluator) []ruleengine.RuleFailure { - results := []ruleengine.RuleFailure{} - podId := utils.CreateK8sPodID(event.GetNamespace(), event.GetPod()) - details, ok := rm.podToWlid.Load(podId) - if !ok { - logger.L().Debug("RuleManager - pod not present in podToWlid, skipping event", helpers.String("podId", podId)) - return nil - } - for _, rule := range rules { - if rule == nil { - continue - } - - if !isEventRelevant(rule.Requirements(), eventType) { - continue - } - - res := ruleprocess.ProcessRule(rule, eventType, event, rm.objectCache) - if res != nil { - shouldCooldown, count := rm.ruleCooldown.ShouldCooldown(res) - if shouldCooldown { - logger.L().Debug("RuleManager - rule cooldown", helpers.String("rule", rule.Name()), helpers.Int("seen_count", count)) - continue - } - - res = rm.enrichRuleFailure(res) - if res != nil { - res.SetWorkloadDetails(details) - results = append(results, res) - } - rm.metrics.ReportRuleAlert(rule.Name()) - } - rm.metrics.ReportRuleProcessed(rule.Name()) - } - return results -} - -func (rm *RuleManager) enrichRuleFailure(ruleFailure ruleengine.RuleFailure) ruleengine.RuleFailure { - var err error - var path string - var hostPath string - ruleFailure.SetAlertPlatform(armotypes.AlertSourcePlatformK8s) - if ruleFailure.GetRuntimeProcessDetails().ProcessTree.Path == "" { - path, err = utils.GetPathFromPid(ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID) - } - - if err != nil { - if ruleFailure.GetRuntimeProcessDetails().ProcessTree.Path != "" { - hostPath = filepath.Join("/proc", fmt.Sprintf("/%d/root/%s", rm.containerIdToPid.Get(ruleFailure.GetTriggerEvent().Runtime.ContainerID), ruleFailure.GetRuntimeProcessDetails().ProcessTree.Path)) - } - } else { - hostPath = filepath.Join("/proc", fmt.Sprintf("/%d/root/%s", ruleFailure.GetRuntimeProcessDetails().ProcessTree.PID, path)) - } - - // Enrich BaseRuntimeAlert - baseRuntimeAlert := ruleFailure.GetBaseRuntimeAlert() - - baseRuntimeAlert.Timestamp = time.Unix(0, int64(ruleFailure.GetTriggerEvent().Timestamp)) - var size int64 = 0 - if hostPath != "" { - size, err = utils.GetFileSize(hostPath) - if err != nil { - size = 0 - } - } - - if baseRuntimeAlert.Size == "" && hostPath != "" && size != 0 { - baseRuntimeAlert.Size = humanize.Bytes(uint64(size)) - } - - if size != 0 && size < maxFileSize && hostPath != "" { - if baseRuntimeAlert.MD5Hash == "" || baseRuntimeAlert.SHA1Hash == "" { - sha1hash, md5hash, err := utils.CalculateFileHashes(hostPath) - if err == nil { - baseRuntimeAlert.MD5Hash = md5hash - baseRuntimeAlert.SHA1Hash = sha1hash - } - } - } - - ruleFailure.SetBaseRuntimeAlert(baseRuntimeAlert) - - // Enrich RuntimeAlertK8sDetails - runtimek8sdetails := ruleFailure.GetRuntimeAlertK8sDetails() - if runtimek8sdetails.Image == "" { - runtimek8sdetails.Image = ruleFailure.GetTriggerEvent().Runtime.ContainerImageName - } - - if runtimek8sdetails.ImageDigest == "" { - runtimek8sdetails.ImageDigest = ruleFailure.GetTriggerEvent().Runtime.ContainerImageDigest - } - - if runtimek8sdetails.Namespace == "" { - runtimek8sdetails.Namespace = ruleFailure.GetTriggerEvent().K8s.Namespace - } - - if runtimek8sdetails.PodName == "" { - runtimek8sdetails.PodName = ruleFailure.GetTriggerEvent().K8s.PodName - } - - if runtimek8sdetails.PodNamespace == "" { - runtimek8sdetails.PodNamespace = ruleFailure.GetTriggerEvent().K8s.Namespace - } - - if runtimek8sdetails.ContainerName == "" { - runtimek8sdetails.ContainerName = ruleFailure.GetTriggerEvent().K8s.ContainerName - } - - if runtimek8sdetails.ContainerID == "" { - runtimek8sdetails.ContainerID = ruleFailure.GetTriggerEvent().Runtime.ContainerID - } - - if runtimek8sdetails.HostNetwork == nil { - hostNetwork := ruleFailure.GetTriggerEvent().K8s.HostNetwork - runtimek8sdetails.HostNetwork = &hostNetwork - } - - ruleFailure.SetRuntimeAlertK8sDetails(runtimek8sdetails) - - if cloudServices := rm.dnsManager.ResolveContainerProcessToCloudServices(ruleFailure.GetTriggerEvent().Runtime.ContainerID, ruleFailure.GetBaseRuntimeAlert().InfectedPID); cloudServices != nil { - ruleFailure.SetCloudServices(cloudServices.ToSlice()) - } - - if rm.enricher != nil && !reflect.ValueOf(rm.enricher).IsNil() { - if err := rm.enricher.EnrichRuleFailure(ruleFailure); err != nil { - if errors.Is(err, ruleprocess.ErrRuleShouldNotBeAlerted) { - return nil - } - } - } - - return ruleFailure -} - -// Checks if the event type is relevant to the rule. -func isEventRelevant(ruleSpec ruleengine.RuleSpec, eventType utils.EventType) bool { - for _, i := range ruleSpec.RequiredEventTypes() { - if i == eventType { - return true - } - } - return false -} - -func (rm *RuleManager) HasApplicableRuleBindings(namespace, name string) bool { - return len(rm.ruleBindingCache.ListRulesForPod(namespace, name)) > 0 -} - -func (rm *RuleManager) HasFinalApplicationProfile(pod *corev1.Pod) bool { - for _, c := range utils.GetContainerStatuses(pod.Status) { - ap := rm.objectCache.ApplicationProfileCache().GetApplicationProfile(utils.TrimRuntimePrefix(c.ContainerID)) - if ap != nil { - if status, ok := ap.Annotations[helpersv1.StatusMetadataKey]; ok { - // in theory, only completed profiles are stored in cache, but we check anyway - return status == helpersv1.Completed - } - } - } - return false -} - -func (rm *RuleManager) IsContainerMonitored(k8sContainerID string) bool { - return rm.trackedContainers.Contains(k8sContainerID) -} - -func (rm *RuleManager) IsPodMonitored(namespace, pod string) bool { - return rm.podToWlid.Has(utils.CreateK8sPodID(namespace, pod)) -} - -func (rm *RuleManager) EvaluatePolicyRulesForEvent(eventType utils.EventType, event utils.K8sEvent) []string { - results := []string{} - - creator := rm.ruleBindingCache.GetRuleCreator() - rules := creator.CreateRulePolicyRulesByEventType(eventType) - - for _, rule := range rules { - rule, ok := rule.(ruleengine.RuleCondition) - if !ok { - continue - } - - if detectionResult := rule.EvaluateRule(eventType, event, rm.objectCache.K8sObjectCache()); detectionResult.IsFailure { - results = append(results, rule.ID()) - } - } - - return results -} diff --git a/pkg/rulemanager/v1/ruleprocess/errors.go b/pkg/rulemanager/v1/ruleprocess/errors.go deleted file mode 100644 index 9ae5be92d..000000000 --- a/pkg/rulemanager/v1/ruleprocess/errors.go +++ /dev/null @@ -1,6 +0,0 @@ -package ruleprocess - -import "fmt" - -var ErrRuleShouldNotBeAlerted = fmt.Errorf("rule should not be alerted") -var NoProfileAvailable = fmt.Errorf("no profile available") diff --git a/pkg/rulemanager/v1/ruleprocess/process.go b/pkg/rulemanager/v1/ruleprocess/process.go deleted file mode 100644 index aa187e394..000000000 --- a/pkg/rulemanager/v1/ruleprocess/process.go +++ /dev/null @@ -1,114 +0,0 @@ -package ruleprocess - -import ( - "errors" - "fmt" - - "github.com/armosec/armoapi-go/armotypes" - "github.com/kubescape/node-agent/pkg/objectcache" - "github.com/kubescape/node-agent/pkg/ruleengine" - "github.com/kubescape/node-agent/pkg/utils" -) - -func ProcessRule(rule ruleengine.RuleEvaluator, eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { - profileDependency := rule.Requirements().GetProfileRequirements().ProfileDependency - - // Handle profile-based evaluation - if profileDependency == armotypes.Required || profileDependency == armotypes.Optional { - return processWithProfile(rule, eventType, event, objCache, profileDependency) - } - - // Handle basic evaluation (no profile dependency) - return processBasicRule(rule, eventType, event, objCache) -} - -func processWithProfile(rule ruleengine.RuleEvaluator, eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, profileDependency armotypes.ProfileDependency) ruleengine.RuleFailure { - result, err := rule.EvaluateRuleWithProfile(eventType, event, objCache) - - // Handle profile evaluation results - switch { - case errors.Is(err, NoProfileAvailable): - if profileDependency == armotypes.Required { - return nil // Required profile not available - no failure - } - // Optional profile not available - fall back to basic evaluation - ruleFailure := processBasicRule(rule, eventType, event, objCache) - if ruleFailure != nil { - setProfileMetadata(rule, ruleFailure, objCache, false) - return ruleFailure - } - return nil // No failure from basic evaluation - case result.IsFailure: - // Profile evaluation failed - create failure with profile metadata - return createRuleFailureWithProfile(rule, eventType, event, objCache, result, true) - - default: - // Profile evaluation passed - no failure - return nil - } -} - -func processBasicRule(rule ruleengine.RuleEvaluator, eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache) ruleengine.RuleFailure { - result := rule.EvaluateRule(eventType, event, objCache.K8sObjectCache()) - if !result.IsFailure { - return nil - } - - ruleFailure := rule.CreateRuleFailure(eventType, event, objCache, result) - return ruleFailure -} - -func createRuleFailureWithProfile(rule ruleengine.RuleEvaluator, eventType utils.EventType, event utils.K8sEvent, objCache objectcache.ObjectCache, payload ruleengine.DetectionResult, failOnProfile bool) ruleengine.RuleFailure { - ruleFailure := rule.CreateRuleFailure(eventType, event, objCache, payload) - if ruleFailure == nil { - return nil - } - - setProfileMetadata(rule, ruleFailure, objCache, failOnProfile) - return ruleFailure -} - -func setProfileMetadata(rule ruleengine.RuleEvaluator, ruleFailure ruleengine.RuleFailure, objectCache objectcache.ObjectCache, failOnProfile bool) { - baseRuntimeAlert := ruleFailure.GetBaseRuntimeAlert() - profileReq := rule.Requirements().GetProfileRequirements() - - switch profileReq.ProfileType { - case armotypes.ApplicationProfile: - state := objectCache.ApplicationProfileCache().GetApplicationProfileState(ruleFailure.GetTriggerEvent().Runtime.ContainerID) - if state != nil { - profileMetadata := &armotypes.ProfileMetadata{ - Status: state.Status, - Completion: state.Completion, - Name: state.Name, - FailOnProfile: failOnProfile, - Type: armotypes.ApplicationProfile, - ProfileDependency: profileReq.ProfileDependency, - Error: state.Error, - } - baseRuntimeAlert.ProfileMetadata = profileMetadata - } - - case armotypes.NetworkProfile: - state := objectCache.NetworkNeighborhoodCache().GetNetworkNeighborhoodState(ruleFailure.GetTriggerEvent().Runtime.ContainerID) - if state != nil { - profileMetadata := &armotypes.ProfileMetadata{ - Status: state.Status, - Completion: state.Completion, - Name: state.Name, - FailOnProfile: failOnProfile, - Type: armotypes.NetworkProfile, - ProfileDependency: profileReq.ProfileDependency, - Error: state.Error, - } - baseRuntimeAlert.ProfileMetadata = profileMetadata - } - default: - profileMetadata := &armotypes.ProfileMetadata{ - ProfileDependency: profileReq.ProfileDependency, - FailOnProfile: failOnProfile, - Error: fmt.Errorf("profile type %d not supported", profileReq.ProfileType), - } - baseRuntimeAlert.ProfileMetadata = profileMetadata - } - ruleFailure.SetBaseRuntimeAlert(baseRuntimeAlert) -} diff --git a/pkg/sbommanager/v1/sbom_manager.go b/pkg/sbommanager/v1/sbom_manager.go index 26b9e835e..4589d185f 100644 --- a/pkg/sbommanager/v1/sbom_manager.go +++ b/pkg/sbommanager/v1/sbom_manager.go @@ -157,13 +157,34 @@ func (s *SbomManager) ContainerCallback(notif containercollection.PubSubEvent) { if s.cfg.IgnoreContainer(notif.Container.K8s.Namespace, notif.Container.K8s.PodName, notif.Container.K8s.PodLabels) { return } + // get container mounts + pid := strconv.Itoa(int(notif.Container.ContainerPid())) + mounts, err := s.getMountedVolumes(pid) + if err != nil { + logger.L().Ctx(s.ctx).Error("SbomManager - failed to get mounted volumes", + helpers.Error(err), + helpers.String("namespace", notif.Container.K8s.Namespace), + helpers.String("pod", notif.Container.K8s.PodName), + helpers.String("container", notif.Container.K8s.ContainerName)) + return + } + // get image layers + imageStatus, err := s.getImageStatus(notif.Container.Runtime.ContainerImageName) // use original name to ask the CRI + if err != nil { + logger.L().Ctx(s.ctx).Error("SbomManager - failed to get image layers", + helpers.Error(err), + helpers.String("namespace", notif.Container.K8s.Namespace), + helpers.String("pod", notif.Container.K8s.PodName), + helpers.String("container", notif.Container.K8s.ContainerName)) + return + } // enqueue the container for processing s.pool.Submit(func() { - s.processContainer(notif) + s.processContainer(notif, mounts, imageStatus) }, utils.FuncName(s.processContainer)) } -func (s *SbomManager) processContainer(notif containercollection.PubSubEvent) { +func (s *SbomManager) processContainer(notif containercollection.PubSubEvent, mounts []string, imageStatus *runtime.ImageStatusResponse) { sharedData, err := s.waitForSharedContainerData(notif.Container.Runtime.ContainerID) if err != nil { logger.L().Error("SbomManager - container not found in shared data", @@ -283,29 +304,6 @@ func (s *SbomManager) processContainer(notif containercollection.PubSubEvent) { // track SBOM as processing in internal state to prevent concurrent processing s.processing.Add(sbomName) defer s.processing.Remove(sbomName) - // get container mounts - pid := strconv.Itoa(int(notif.Container.ContainerPid())) - mounts, err := s.getMountedVolumes(pid) - if err != nil { - logger.L().Ctx(s.ctx).Error("SbomManager - failed to get mounted volumes", - helpers.Error(err), - helpers.String("namespace", notif.Container.K8s.Namespace), - helpers.String("pod", notif.Container.K8s.PodName), - helpers.String("container", notif.Container.K8s.ContainerName), - helpers.String("sbomName", sbomName)) - return - } - // get image layers - imageStatus, err := s.getImageStatus(notif.Container.Runtime.ContainerImageName) // use original name to ask the CRI - if err != nil { - logger.L().Ctx(s.ctx).Error("SbomManager - failed to get image layers", - helpers.Error(err), - helpers.String("namespace", notif.Container.K8s.Namespace), - helpers.String("pod", notif.Container.K8s.PodName), - helpers.String("container", notif.Container.K8s.ContainerName), - helpers.String("sbomName", sbomName)) - return - } // prepare image source src, err := NewSource(sharedData.ImageTag, sharedData.ImageID, imageID, imageStatus, mounts, s.cfg.MaxImageSize) if err != nil { diff --git a/pkg/utils/events.go b/pkg/utils/events.go index deaa6e925..ae3392e00 100644 --- a/pkg/utils/events.go +++ b/pkg/utils/events.go @@ -3,6 +3,7 @@ package utils import ( "fmt" "path/filepath" + "reflect" tracerexectype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/exec/types" traceropentype "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/trace/open/types" @@ -28,23 +29,23 @@ type EnrichEvent interface { type EventType string const ( - ExecveEventType EventType = "exec" - OpenEventType EventType = "open" + AllEventType EventType = "all" CapabilitiesEventType EventType = "capabilities" DnsEventType EventType = "dns" - NetworkEventType EventType = "network" - SyscallEventType EventType = "syscall" - RandomXEventType EventType = "randomx" - SymlinkEventType EventType = "symlink" - HardlinkEventType EventType = "hardlink" - SSHEventType EventType = "ssh" + ExecveEventType EventType = "exec" + ExitEventType EventType = "exit" + ForkEventType EventType = "fork" HTTPEventType EventType = "http" - PtraceEventType EventType = "ptrace" + HardlinkEventType EventType = "hardlink" IoUringEventType EventType = "iouring" - ForkEventType EventType = "fork" - ExitEventType EventType = "exit" + NetworkEventType EventType = "network" + OpenEventType EventType = "open" ProcfsEventType EventType = "procfs" - AllEventType EventType = "all" + PtraceEventType EventType = "ptrace" + RandomXEventType EventType = "randomx" + SSHEventType EventType = "ssh" + SymlinkEventType EventType = "symlink" + SyscallEventType EventType = "syscall" ) // Get the path of the file on the node. @@ -79,3 +80,62 @@ func GetExecArgsFromEvent(event *tracerexectype.Event) []string { } return []string{} } + +func GetCommFromEvent(event any) string { + if event == nil { + return "" + } + + v := reflect.ValueOf(event) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + // Only proceed if it's a struct + if v.Kind() != reflect.Struct { + return "" + } + + if commField := v.FieldByName("Comm"); commField.IsValid() && commField.Kind() == reflect.String { + return commField.String() + } + + return "" +} + +// GetContainerIDFromEvent uses reflection to extract the ContainerID from any event type +// without requiring type conversion. Returns empty string if ContainerID field is not found. +func GetContainerIDFromEvent(event interface{}) string { + if event == nil { + return "" + } + + v := reflect.ValueOf(event) + // Handle pointer types + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + // Only proceed if it's a struct + if v.Kind() != reflect.Struct { + return "" + } + + // Try to get the Runtime field first + if runtimeField := v.FieldByName("Runtime"); runtimeField.IsValid() { + runtimeValue := runtimeField + if runtimeValue.Kind() == reflect.Ptr { + runtimeValue = runtimeValue.Elem() + } + + // Only proceed if Runtime is a struct + if runtimeValue.Kind() == reflect.Struct { + // Try to get the ContainerID field from Runtime + if containerIDField := runtimeValue.FieldByName("ContainerID"); containerIDField.IsValid() && containerIDField.Kind() == reflect.String { + return containerIDField.String() + } + } + } + + return "" +} diff --git a/pkg/utils/events_test.go b/pkg/utils/events_test.go new file mode 100644 index 000000000..a4f81f12f --- /dev/null +++ b/pkg/utils/events_test.go @@ -0,0 +1,99 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// Test struct that mimics the structure of events with Comm field +type testEventWithComm struct { + Comm string +} + +// Test struct that mimics the structure of events with Runtime field +type testEventWithRuntime struct { + Runtime struct { + ContainerID string + } +} + +func TestGetCommFromEvent(t *testing.T) { + tests := []struct { + name string + event interface{} + expected string + }{ + { + name: "Event with Comm", + event: &testEventWithComm{ + Comm: "test-comm", + }, + expected: "test-comm", + }, + { + name: "nil event", + event: nil, + expected: "", + }, + { + name: "event without Comm field", + event: "string", + expected: "", + }, + { + name: "event with empty Comm field", + event: &testEventWithComm{}, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetCommFromEvent(tt.event) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetContainerIDFromEvent(t *testing.T) { + tests := []struct { + name string + event interface{} + expected string + }{ + { + name: "Event with ContainerID", + event: &testEventWithRuntime{ + Runtime: struct { + ContainerID string + }{ + ContainerID: "test-container-id", + }, + }, + expected: "test-container-id", + }, + { + name: "nil event", + event: nil, + expected: "", + }, + { + name: "event without Runtime field", + event: "string", + expected: "", + }, + { + name: "event with Runtime but no ContainerID", + event: &testEventWithRuntime{}, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetContainerIDFromEvent(tt.event) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/watcher/dynamicwatcher/watch.go b/pkg/watcher/dynamicwatcher/watch.go index 45e58336f..59b450c59 100644 --- a/pkg/watcher/dynamicwatcher/watch.go +++ b/pkg/watcher/dynamicwatcher/watch.go @@ -144,6 +144,8 @@ func (wh *WatchHandler) chooseWatcher(res schema.GroupVersionResource, opts meta return wh.k8sClient.GetKubernetesClient().CoreV1().Pods("").Watch(context.Background(), opts) case "runtimerulealertbindings": return wh.k8sClient.GetDynamicClient().Resource(res).Namespace("").Watch(context.Background(), opts) + case "rules": + return wh.k8sClient.GetDynamicClient().Resource(res).Namespace("").Watch(context.Background(), opts) case "seccompprofiles": opts.ResourceVersion = softwarecomposition.ResourceVersionFullSpec return wh.storageClient.SeccompProfiles("").Watch(context.Background(), opts) diff --git a/tests/chart/crds/rules.crd.yaml b/tests/chart/crds/rules.crd.yaml new file mode 100644 index 000000000..252573465 --- /dev/null +++ b/tests/chart/crds/rules.crd.yaml @@ -0,0 +1,124 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: rules.kubescape.io +spec: + group: kubescape.io + names: + kind: Rules + listKind: RulesList + plural: rules + singular: rule + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + properties: + rules: + type: array + items: + type: object + properties: + enabled: + type: boolean + description: "Whether the rule is enabled" + id: + type: string + description: "Unique identifier for the rule" + name: + type: string + description: "Name of the rule" + description: + type: string + description: "Description of the rule" + expressions: + type: object + properties: + message: + type: string + description: "Message expression" + unique_id: + type: string + description: "Unique identifier expression" + rule_expression: + type: array + items: + type: object + properties: + event_type: + type: string + enum: + - "exec" + - "open" + - "capabilities" + - "dns" + - "network" + - "syscall" + - "randomx" + - "symlink" + - "hardlink" + - "ssh" + - "http" + - "ptrace" + - "iouring" + - "fork" + - "exit" + - "procfs" + description: "Type of event this expression handles" + expression: + type: string + description: "The rule expression string" + required: + - event_type + - expression + required: + - message + - unique_id + - rule_expression + profile_dependency: + type: integer + enum: [0, 1, 2] + description: "Profile dependency level (0=Required, 1=Optional, 2=NotRequired)" + severity: + type: integer + description: "Severity level of the rule" + support_policy: + type: boolean + description: "Whether the rule supports rule policy enforcement" + default: false + tags: + type: array + items: + type: string + description: "Tags associated with the rule" + state: + type: object + additionalProperties: true + description: "State information for the rule" + required: + - enabled + - id + - name + - description + - expressions + - profile_dependency + - severity + - support_policy + - tags + required: + - rules + subresources: + status: {} \ No newline at end of file diff --git a/tests/chart/templates/node-agent/clusterrole.yaml b/tests/chart/templates/node-agent/clusterrole.yaml index 80ed6f9cf..03d513755 100644 --- a/tests/chart/templates/node-agent/clusterrole.yaml +++ b/tests/chart/templates/node-agent/clusterrole.yaml @@ -23,3 +23,6 @@ rules: - apiGroups: ["kubescape.io"] resources: ["runtimerulealertbindings"] verbs: ["list", "watch"] +- apiGroups: ["kubescape.io"] + resources: ["rules"] + verbs: ["list", "watch"] diff --git a/tests/chart/templates/node-agent/configmap.yaml b/tests/chart/templates/node-agent/configmap.yaml index 8be1a0a2f..11cccc3ee 100644 --- a/tests/chart/templates/node-agent/configmap.yaml +++ b/tests/chart/templates/node-agent/configmap.yaml @@ -32,6 +32,10 @@ data: "ruleCooldownAfterCount": {{ .Values.nodeAgent.config.ruleCooldown.ruleCooldownAfterCount }}, "ruleCooldownOnProfileFailure": {{ .Values.nodeAgent.config.ruleCooldown.ruleCooldownOnProfileFailure }}, "ruleCooldownMaxSize": {{ .Values.nodeAgent.config.ruleCooldown.ruleCooldownMaxSize }} + }, + "celConfigCache": { + "maxSize": {{ .Values.nodeAgent.config.celConfigCache.maxSize }}, + "ttl": "{{ .Values.nodeAgent.config.celConfigCache.ttl }}" } } --- diff --git a/tests/chart/templates/node-agent/default-rule-binding.yaml b/tests/chart/templates/node-agent/default-rule-binding.yaml index 01a237963..26367de97 100644 --- a/tests/chart/templates/node-agent/default-rule-binding.yaml +++ b/tests/chart/templates/node-agent/default-rule-binding.yaml @@ -15,29 +15,26 @@ spec: - "kubeconfig" rules: - ruleName: "Unexpected process launched" - - ruleName: "Unexpected file access" - parameters: - ignoreMounts: true - ignorePrefixes: ["/proc", "/run/secrets/kubernetes.io/serviceaccount", "/var/run/secrets/kubernetes.io/serviceaccount", "/tmp"] - - ruleName: "Unexpected system call" - - ruleName: "Unexpected capability used" - - ruleName: "Unexpected domain request" - - ruleName: "Unexpected Service Account Token Access" - - ruleName: "Kubernetes Client Executed" - - ruleName: "Exec from malicious source" - - ruleName: "Kernel Module Load" - - ruleName: "Exec Binary Not In Base Image" - - ruleName: "Malicious SSH Connection" - - ruleName: "Fileless Execution" - - ruleName: "XMR Crypto Mining Detection" - - ruleName: "Exec from mount" + - ruleName: "Files Access Anomalies in container" + - ruleName: "Syscalls Anomalies in container" + - ruleName: "Linux Capabilities Anomalies in container" + - ruleName: "DNS Anomalies in container" + - ruleName: "Unexpected service account token access" + - ruleName: "Workload uses Kubernetes API unexpectedly" + - ruleName: "Process executed from malicious source" + - ruleName: "Process tries to load a kernel module" + - ruleName: "Drifted process executed" + - ruleName: "Disallowed ssh connection" + - ruleName: "Fileless execution detected" + - ruleName: "Crypto miner launched" + - ruleName: "Process executed from mount" - ruleName: "Crypto Mining Related Port Communication" - ruleName: "Crypto Mining Domain Communication" - ruleName: "Read Environment Variables from procfs" - ruleName: "eBPF Program Load" - - ruleName: "Symlink Created Over Sensitive File" + - ruleName: "Soft link created over sensitive file" - ruleName: "Unexpected Sensitive File Access" - - ruleName: "Hardlink Created Over Sensitive File" + - ruleName: "Hard link created over sensitive file" - ruleName: "Exec to pod" - ruleName: "Port forward" - ruleName: "Unexpected Egress Network Traffic" diff --git a/tests/chart/templates/node-agent/default-rules.yaml b/tests/chart/templates/node-agent/default-rules.yaml new file mode 100644 index 000000000..b098351c3 --- /dev/null +++ b/tests/chart/templates/node-agent/default-rules.yaml @@ -0,0 +1,489 @@ +apiVersion: kubescape.io/v1 +kind: Rules +metadata: + name: kubescape-rules + namespace: kubescape + annotations: + kubescape.io/namespace: kubescape + labels: + app: kubescape +spec: + rules: + - name: "Unexpected process launched" + enabled: true + id: "R0001" + description: "Detects unexpected process launches that are not in the baseline" + expressions: + message: "'Unexpected process launched: ' + exec.comm + ' with PID ' + string(exec.pid)" + unique_id: "exec.comm + '_' + exec.exe_path" + rule_expression: + - event_type: "exec" + expression: "!ap.was_executed(exec.runtime.container_id, parse.get_exec_path(exec.args, exec.comm))" + profile_dependency: 0 + severity: 1 + support_policy: false + tags: + - "anomaly" + - "process" + - "exec" + - "applicationprofile" + - name: "Files Access Anomalies in container" + enabled: true + id: "R0002" + description: "Detects unexpected file access that is not in the baseline" + expressions: + message: "'Unexpected file access detected: ' + open.comm + ' with PID ' + string(open.pid) + ' to ' + open.full_path" + unique_id: "open.comm + '_' + open.full_path" + rule_expression: + - event_type: "open" + expression: > + (open.full_path.startsWith('/etc/') || open.full_path.startsWith('/var/log/') || open.full_path.startsWith('/var/run/') || open.full_path.startsWith('/run/') || open.full_path.startsWith('/var/spool/cron/') || open.full_path.startsWith('/var/www/') || open.full_path.startsWith('/var/lib/') || open.full_path.startsWith('/opt/') || open.full_path.startsWith('/usr/local/') || open.full_path.startsWith('/app/') || open.full_path == '/.dockerenv' || open.full_path == '/proc/self/environ') && !(open.full_path.startsWith('/run/secrets/kubernetes.io/serviceaccount') || + + open.full_path.startsWith('/var/run/secrets/kubernetes.io/serviceaccount') || + open.full_path.startsWith('/tmp')) + && !ap.was_path_opened(open.runtime.container_id, open.full_path) + + profile_dependency: 0 + severity: 1 + support_policy: false + tags: + - "anomaly" + - "file" + - "open" + - "applicationprofile" + - name: "Syscalls Anomalies in container" + enabled: true + id: "R0003" + description: "Detects unexpected system calls that are not whitelisted by application profile" + expressions: + message: "'Unexpected system call detected: ' + syscall.syscall_name + ' with PID ' + string(syscall.pid)" + unique_id: "syscall.syscall_name" + rule_expression: + - event_type: "syscall" + expression: "!ap.was_syscall_used(syscall.runtime.container_id, syscall.syscall_name)" + profile_dependency: 0 + severity: 1 + support_policy: false + tags: + - "anomaly" + - "syscall" + - "applicationprofile" + - name: "Linux Capabilities Anomalies in container" + enabled: true + id: "R0004" + description: "Detects unexpected capabilities that are not whitelisted by application profile" + expressions: + message: "'Unexpected capability used: ' + capabilities.cap_name + ' in syscall ' + capabilities.syscall + ' with PID ' + string(capabilities.pid)" + unique_id: "capabilities.comm + '_' + capabilities.cap_name" + rule_expression: + - event_type: "capabilities" + expression: "!ap.was_capability_used(capabilities.runtime.container_id, capabilities.cap_name)" + profile_dependency: 0 + severity: 1 + support_policy: false + tags: + - "anomaly" + - "capabilities" + - "applicationprofile" + - name: "DNS Anomalies in container" + enabled: true + id: "R0005" + description: "Detecting unexpected domain requests that are not whitelisted by application profile." + expressions: + message: "'Unexpected domain communication: ' + dns.dns_name" + unique_id: "dns.comm + '_' + dns.dns_name" + rule_expression: + - event_type: "dns" + expression: "!dns.dns_name.endsWith('.svc.cluster.local.') && !nn.is_domain_in_egress(dns.runtime.container_id, dns.dns_name)" + profile_dependency: 0 + severity: 1 + support_policy: false + tags: + - "dns" + - "anomaly" + - "networkprofile" + - name: "Unexpected service account token access" + enabled: true + id: "R0006" + description: "Detecting unexpected access to service account token." + expressions: + message: "'Unexpected access to service account token: ' + open.full_path + ' with flags: ' + open.flags.join(',')" + unique_id: "open.comm" + rule_expression: + - event_type: "open" + expression: "((open.full_path.startsWith('/run/secrets/kubernetes.io/serviceaccount') && open.full_path.endsWith('/token')) || \n (open.full_path.startsWith('/var/run/secrets/kubernetes.io/serviceaccount') && open.full_path.endsWith('/token')) ||\n (open.full_path.startsWith('/run/secrets/eks.amazonaws.com/serviceaccount') && open.full_path.endsWith('/token')) ||\n (open.full_path.startsWith('/var/run/secrets/eks.amazonaws.com/serviceaccount') && open.full_path.endsWith('/token'))) &&\n!ap.was_path_opened_with_prefix(open.runtime.container_id, '/run/secrets/kubernetes.io/serviceaccount') && !ap.was_path_opened_with_prefix(open.runtime.container_id, '/var/run/secrets/kubernetes.io/serviceaccount') && !ap.was_path_opened_with_prefix(open.runtime.container_id, '/run/secrets/eks.amazonaws.com/serviceaccount') && !ap.was_path_opened_with_prefix(open.runtime.container_id, '/var/run/secrets/eks.amazonaws.com/serviceaccount')\n" + profile_dependency: 1 + severity: 5 + support_policy: false + tags: + - "anomaly" + - "serviceaccount" + - "applicationprofile" + - name: "Workload uses Kubernetes API unexpectedly" + enabled: true + id: "R0007" + description: "Detecting execution of kubernetes client" + expressions: + message: "event_type == 'exec' ? 'Kubernetes client (' + exec.comm + ') was executed with PID ' + string(exec.pid) : 'Network connection to Kubernetes API server from ' + network.runtime.container_id" + unique_id: "event_type == 'exec' ? 'exec_' + exec.comm : 'network_' + network.dst_endpoint.addr" + rule_expression: + - event_type: "exec" + expression: "(exec.comm == 'kubectl' || exec.exe_path.endsWith('/kubectl')) && !ap.was_executed(exec.runtime.container_id, parse.get_exec_path(exec.args, exec.comm))" + - event_type: "network" + expression: "network.pkt_type == 'OUTGOING' && k8s.is_api_server_address(network.dst_endpoint.addr) && !nn.was_address_in_egress(network.runtime.container_id, network.dst_endpoint.addr)" + profile_dependency: 0 + severity: 5 # Medium + support_policy: false + tags: + - "exec" + - "network" + - "anomaly" + - "applicationprofile" + - name: "Read Environment Variables from procfs" + enabled: true + id: "R0008" + description: "Detecting reading environment variables from procfs." + expressions: + message: "'Reading environment variables from procfs: ' + open.full_path + ' by process ' + open.comm" + unique_id: "open.comm" + rule_expression: + - event_type: "open" + expression: > + open.full_path.startsWith('/proc/') && open.full_path.endsWith('/environ') && !ap.was_path_opened_with_suffix(open.runtime.container_id, '/environ') + + profile_dependency: 0 # Required + severity: 5 # Medium + support_policy: false + tags: + - "anomaly" + - "procfs" + - "environment" + - "applicationprofile" + - name: "eBPF Program Load" + enabled: true + id: "R0009" + description: "Detecting eBPF program load." + expressions: + message: "'bpf system call executed in ' + syscall.k8s.container_name" + unique_id: "syscall.comm + '_' + syscall.syscall_name" + rule_expression: + - event_type: "syscall" + expression: "syscall.syscall_name == 'bpf' && !ap.was_syscall_used(syscall.runtime.container_id, syscall.syscall_name)" + profile_dependency: 1 + severity: 5 + support_policy: false + tags: + - "syscall" + - "ebpf" + - "applicationprofile" + - name: "Unexpected Sensitive File Access" + enabled: true + id: "R0010" + description: "Detecting access to sensitive files." + expressions: + message: "'Unexpected sensitive file access: ' + open.full_path + ' by process ' + open.comm" + unique_id: "open.comm + '_' + open.full_path" + rule_expression: + - event_type: "open" + expression: "open.full_path.startsWith('/etc/shadow') && !ap.was_path_opened(open.runtime.container_id, open.full_path)" + profile_dependency: 1 + severity: 5 + support_policy: false + tags: + - "files" + - "anomaly" + - "applicationprofile" + - name: "Unexpected Egress Network Traffic" + enabled: false + id: "R0011" + description: "Detecting unexpected egress network traffic that is not whitelisted by application profile." + expressions: + message: "'Unexpected egress network communication to: ' + network.dst_endpoint.addr + ':' + string(dyn(network.port)) + ' using ' + network.proto + ' from: ' + network.k8s.container_name" + unique_id: "network.dst_endpoint.addr + '_' + string(dyn(network.port)) + '_' + network.proto" + rule_expression: + - event_type: "network" + expression: "network.pkt_type == 'OUTGOING' && !net.is_private_ip(network.dst_endpoint.addr) && !nn.was_address_in_egress(network.runtime.container_id, network.dst_endpoint.addr)" + profile_dependency: 0 + severity: 5 # Medium + support_policy: false + tags: + - "whitelisted" + - "network" + - "anomaly" + - "networkprofile" + - name: "Process executed from malicious source" + enabled: true + id: "R1000" + description: "Detecting exec calls that are from malicious source like: /dev/shm" + expressions: + message: "'Execution from malicious source: ' + exec.exe_path + ' in directory ' + exec.cwd" + unique_id: "exec.comm + '_' + exec.exe_path + '_' + exec.pcomm" + rule_expression: + - event_type: "exec" + expression: > + (exec.exe_path == '/dev/shm' || exec.exe_path.startsWith('/dev/shm/')) || (exec.cwd == '/dev/shm' || exec.cwd.startsWith('/dev/shm/') || (parse.get_exec_path(exec.args, exec.comm).startsWith('/dev/shm/'))) + + profile_dependency: 2 + severity: 8 + support_policy: false + tags: + - "exec" + - "signature" + - "malicious" + - name: "Drifted process executed" + enabled: true + id: "R1001" + description: "Detecting exec calls of binaries that are not included in the base image" + expressions: + message: "'Process (' + exec.comm + ') was executed and is not part of the image'" + unique_id: "exec.comm + '_' + exec.exe_path + '_' + exec.pcomm" + rule_expression: + - event_type: "exec" + expression: > + (exec.upper_layer == true || + + exec.pupper_layer == true) && + !ap.was_executed(exec.runtime.container_id, parse.get_exec_path(exec.args, exec.comm)) + + profile_dependency: 1 + severity: 8 + support_policy: false + tags: + - "exec" + - "malicious" + - "binary" + - "base image" + - "applicationprofile" + - name: "Process tries to load a kernel module" + enabled: true + id: "R1002" + description: "Detecting Kernel Module Load." + expressions: + message: "'Kernel module load syscall (' + syscall.syscall_name + ') was called'" + unique_id: "syscall.syscall_name" + rule_expression: + - event_type: "syscall" + expression: > + syscall.syscall_name == 'init_module' || syscall.syscall_name == 'finit_module' + + profile_dependency: 2 + severity: 10 + support_policy: false + tags: + - "syscall" + - "kernel" + - "module" + - "load" + - name: "Disallowed ssh connection" + enabled: true + id: "R1003" + description: "Detecting ssh connection to disallowed port" + expressions: + message: "'Malicious SSH connection attempt to ' + ssh.dst_ip + ':' + string(dyn(ssh.dst_port))" + unique_id: "ssh.comm + '_' + ssh.dst_ip + '_' + string(dyn(ssh.dst_port))" + rule_expression: + - event_type: "ssh" + expression: "dyn(ssh.src_port) >= 32768 && dyn(ssh.src_port) <= 60999 && !(dyn(ssh.dst_port) in [22, 2022]) && !nn.was_address_in_egress(ssh.runtime.container_id, ssh.dst_ip)" + profile_dependency: 1 + severity: 5 + support_policy: false + tags: + - "ssh" + - "connection" + - "port" + - "malicious" + - "networkprofile" + - name: "Process executed from mount" + enabled: true + id: "R1004" + description: "Detecting exec calls from mounted paths." + expressions: + message: "'Process (' + exec.comm + ') was executed from a mounted path'" + unique_id: "exec.comm + '_' + exec.exe_path" + rule_expression: + - event_type: "exec" + expression: "!ap.was_executed(exec.runtime.container_id, parse.get_exec_path(exec.args, exec.comm)) && k8s.get_container_mount_paths(exec.k8s.namespace, exec.k8s.pod_name, exec.k8s.container_name).exists(mount, exec.exe_path.startsWith(mount) || parse.get_exec_path(exec.args, exec.comm).startsWith(mount))" + profile_dependency: 1 + severity: 5 + support_policy: false + tags: + - "exec" + - "mount" + - "applicationprofile" + - name: "Fileless execution detected" + enabled: true + id: "R1005" + description: "Detecting Fileless Execution" + expressions: + message: "'Fileless execution detected: exec call \"' + exec.comm + '\" is from a malicious source'" + unique_id: "exec.comm + '_' + exec.exe_path + '_' + exec.pcomm" + rule_expression: + - event_type: "exec" + expression: "exec.exe_path.contains('memfd') || exec.exe_path.startsWith('/proc/self/fd') || exec.exe_path.matches('/proc/[0-9]+/fd/[0-9]+')" + profile_dependency: 2 + severity: 8 + support_policy: false + tags: + - "fileless" + - "execution" + - "malicious" + - name: "Process tries to escape container" + enabled: true + id: "R1006" + description: "Detecting Unshare System Call usage, which can be used to escape container." + expressions: + message: "'Unshare system call detected: ' + syscall.syscall_name + ' with PID ' + string(syscall.pid)" + unique_id: "syscall.syscall_name" + rule_expression: + - event_type: "syscall" + expression: "syscall.syscall_name == 'unshare' && !ap.was_syscall_used(syscall.runtime.container_id, syscall.syscall_name)" + profile_dependency: 2 + severity: 5 + support_policy: false + tags: + - "syscall" + - "escape" + - "unshare" + - "anomaly" + - "applicationprofile" + - name: "Crypto miner launched" + enabled: true + id: "R1007" + description: "Detecting XMR Crypto Miners by randomx algorithm usage." + expressions: + message: "'XMR Crypto Miner process: (' + randomx.exe_path + ') executed'" + unique_id: "randomx.exe_path + '_' + randomx.comm" + rule_expression: + - event_type: "randomx" + expression: "true" + profile_dependency: 2 + severity: 10 + support_policy: false + tags: + - "crypto" + - "miners" + - "malicious" + - name: "Crypto Mining Domain Communication" + enabled: true + id: "R1008" + description: "Detecting Crypto miners communication by domain" + expressions: + message: "'Communication with a known crypto mining domain: ' + dns.dns_name" + unique_id: "dns.dns_name + '_' + dns.comm" + rule_expression: + - event_type: "dns" + expression: "dns.dns_name in ['2cryptocalc.com.', '2miners.com.', 'antpool.com.', 'asia1.ethpool.org.', 'bohemianpool.com.', 'botbox.dev.', 'btm.antpool.com.', 'c3pool.com.', 'c4pool.org.', 'ca.minexmr.com.', 'cn.stratum.slushpool.com.', 'dash.antpool.com.', 'data.miningpoolstats.stream.', 'de.minexmr.com.', 'eth-ar.dwarfpool.com.', 'eth-asia.dwarfpool.com.', 'eth-asia1.nanopool.org.', 'eth-au.dwarfpool.com.', 'eth-au1.nanopool.org.', 'eth-br.dwarfpool.com.', 'eth-cn.dwarfpool.com.', 'eth-cn2.dwarfpool.com.', 'eth-eu.dwarfpool.com.', 'eth-eu1.nanopool.org.', 'eth-eu2.nanopool.org.', 'eth-hk.dwarfpool.com.', 'eth-jp1.nanopool.org.', 'eth-ru.dwarfpool.com.', 'eth-ru2.dwarfpool.com.', 'eth-sg.dwarfpool.com.', 'eth-us-east1.nanopool.org.', 'eth-us-west1.nanopool.org.', 'eth-us.dwarfpool.com.', 'eth-us2.dwarfpool.com.', 'eth.antpool.com.', 'eu.stratum.slushpool.com.', 'eu1.ethermine.org.', 'eu1.ethpool.org.', 'fastpool.xyz.', 'fr.minexmr.com.', 'kriptokyng.com.', 'mine.moneropool.com.', 'mine.xmrpool.net.', 'miningmadness.com.', 'monero.cedric-crispin.com.', 'monero.crypto-pool.fr.', 'monero.fairhash.org.', 'monero.hashvault.pro.', 'monero.herominers.com.', 'monerod.org.', 'monerohash.com.', 'moneroocean.stream.', 'monerop.com.', 'multi-pools.com.', 'p2pool.io.', 'pool.kryptex.com.', 'pool.minexmr.com.', 'pool.monero.hashvault.pro.', 'pool.rplant.xyz.', 'pool.supportxmr.com.', 'pool.xmr.pt.', 'prohashing.com.', 'rx.unmineable.com.', 'sg.minexmr.com.', 'sg.stratum.slushpool.com.', 'skypool.org.', 'solo-xmr.2miners.com.', 'ss.antpool.com.', 'stratum-btm.antpool.com.', 'stratum-dash.antpool.com.', 'stratum-eth.antpool.com.', 'stratum-ltc.antpool.com.', 'stratum-xmc.antpool.com.', 'stratum-zec.antpool.com.', 'stratum.antpool.com.', 'supportxmr.com.', 'trustpool.cc.', 'us-east.stratum.slushpool.com.', 'us1.ethermine.org.', 'us1.ethpool.org.', 'us2.ethermine.org.', 'us2.ethpool.org.', 'web.xmrpool.eu.', 'www.domajorpool.com.', 'www.dxpool.com.', 'www.mining-dutch.nl.', 'xmc.antpool.com.', 'xmr-asia1.nanopool.org.', 'xmr-au1.nanopool.org.', 'xmr-eu1.nanopool.org.', 'xmr-eu2.nanopool.org.', 'xmr-jp1.nanopool.org.', 'xmr-us-east1.nanopool.org.', 'xmr-us-west1.nanopool.org.', 'xmr.2miners.com.', 'xmr.crypto-pool.fr.', 'xmr.gntl.uk.', 'xmr.nanopool.org.', 'xmr.pool-pay.com.', 'xmr.pool.minergate.com.', 'xmr.solopool.org.', 'xmr.volt-mine.com.', 'xmr.zeropool.io.', 'zec.antpool.com.', 'zergpool.com.', 'auto.c3pool.org.', 'us.monero.herominers.com.']" + profile_dependency: 2 + severity: 10 + support_policy: false + tags: + - "network" + - "crypto" + - "miners" + - "malicious" + - "dns" + - name: "Crypto Mining Related Port Communication" + enabled: true + id: "R1009" + description: "Detecting Crypto Miners by suspicious port usage." + expressions: + message: "'Detected crypto mining related port communication on port ' + string(dyn(network.port)) + ' to ' + network.dst_endpoint.addr + ' with protocol ' + network.proto" + unique_id: "network.comm + '_' + string(dyn(network.port))" + rule_expression: + - event_type: "network" + expression: "network.proto == 'TCP' && network.pkt_type == 'OUTGOING' && dyn(network.port) in [3333, 45700] && !nn.was_address_in_egress(network.runtime.container_id, network.dst_endpoint.addr)" + profile_dependency: 1 + severity: 3 + support_policy: false + tags: + - "network" + - "crypto" + - "miners" + - "malicious" + - "networkprofile" + - name: "Soft link created over sensitive file" + enabled: true + id: "R1010" + description: "Detects symlink creation over sensitive files" + expressions: + message: "'Symlink created over sensitive file: ' + symlink.old_path + ' -> ' + symlink.new_path" + unique_id: "symlink.comm + '_' + symlink.old_path" + rule_expression: + - event_type: "symlink" + expression: "(symlink.old_path.startsWith('/etc/shadow') || symlink.old_path.startsWith('/etc/sudoers')) && !ap.was_path_opened(symlink.runtime.container_id, symlink.old_path)" + profile_dependency: 1 + severity: 5 + support_policy: true + tags: + - "anomaly" + - "symlink" + - "applicationprofile" + - name: "ld_preload hooks technique detected" + enabled: false + id: "R1011" + description: "Detecting ld_preload hook techniques." + expressions: + message: "event_type == 'exec' ? 'Process (' + exec.comm + ') is using a dynamic linker hook: ' + process.get_ld_hook_var(exec.pid) : 'The dynamic linker configuration file (' + open.full_path + ') was modified by process (' + open.comm + ')'" + unique_id: "event_type == 'exec' ? 'exec_' + exec.comm : 'open_' + open.full_path" + rule_expression: + - event_type: "exec" + expression: "exec.comm != 'java' && exec.k8s.container_name != 'matlab' && process.get_ld_hook_var(exec.pid) != ''" + - event_type: "open" + expression: "open.full_path == '/etc/ld.so.preload' && has(open.flags_raw) && open.flags_raw != 0" + profile_dependency: 1 + severity: 5 + support_policy: true + tags: + - "exec" + - "malicious" + - "applicationprofile" + - name: "Hard link created over sensitive file" + enabled: true + id: "R1012" + description: "Detecting hardlink creation over sensitive files." + expressions: + message: "'Hardlink created over sensitive file: ' + hardlink.old_path + ' - ' + hardlink.new_path" + unique_id: "hardlink.comm + '_' + hardlink.old_path" + rule_expression: + - event_type: "hardlink" + expression: "(hardlink.old_path.startsWith('/etc/shadow') || hardlink.old_path.startsWith('/etc/sudoers')) && !ap.was_path_opened(hardlink.runtime.container_id, hardlink.old_path)" + profile_dependency: 1 + severity: 5 + support_policy: true + tags: + - "files" + - "malicious" + - "applicationprofile" + - name: "Malicious Ptrace Usage" + enabled: true + id: "R1015" + description: "Detecting potentially malicious ptrace usage." + expressions: + message: "'Malicious ptrace usage detected from: ' + ptrace.comm" + unique_id: "ptrace.exe_path + '_' + ptrace.comm" + rule_expression: + - event_type: "ptrace" + expression: "true" + profile_dependency: 2 + severity: 5 + support_policy: false + tags: + - "process" + - "malicious" + - name: "Unexpected io_uring Operation Detected" + enabled: true + id: "R1030" + description: "Detects io_uring operations that were not recorded during the initial observation period, indicating potential unauthorized activity." + expressions: + message: "'Unexpected io_uring operation detected: (opcode=' + string(iouring.opcode) + ') flags=0x' + (has(iouring.flags) ? string(iouring.flags) : '0') + ' in ' + iouring.comm + '.'" + unique_id: "string(iouring.opcode) + '_' + iouring.comm" + rule_expression: + - event_type: "iouring" + expression: "true" + profile_dependency: 0 + severity: 5 + support_policy: true + tags: + - "syscalls" + - "io_uring" + - "applicationprofile" diff --git a/tests/chart/values.yaml b/tests/chart/values.yaml index 4cbaf6183..7cf029c4c 100644 --- a/tests/chart/values.yaml +++ b/tests/chart/values.yaml @@ -71,7 +71,9 @@ nodeAgent: ruleCooldownAfterCount: 10 # for tests ruleCooldownOnProfileFailure: true ruleCooldownMaxSize: 20000 - + celConfigCache: + maxSize: 250000 + ttl: 1s serviceMonitor: enabled: true diff --git a/tests/component_test.go b/tests/component_test.go index cf7d5366f..9b17f7be2 100644 --- a/tests/component_test.go +++ b/tests/component_test.go @@ -18,7 +18,6 @@ import ( "github.com/kubescape/go-logger/helpers" helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers" "github.com/kubescape/k8s-interface/k8sinterface" - "github.com/kubescape/node-agent/pkg/ruleengine/v1" "github.com/kubescape/node-agent/pkg/utils" "github.com/kubescape/node-agent/tests/testutils" "github.com/kubescape/storage/pkg/apis/softwarecomposition/v1beta1" @@ -98,8 +97,8 @@ func Test_01_BasicAlertTest(t *testing.T) { testutils.AssertContains(t, alerts, "Unexpected process launched", "ls", "server", []bool{true}) testutils.AssertNotContains(t, alerts, "Unexpected process launched", "ls", "nginx", []bool{true}) - testutils.AssertContains(t, alerts, "Unexpected domain request", "curl", "nginx", []bool{true}) - testutils.AssertNotContains(t, alerts, "Unexpected domain request", "wget", "server", []bool{true}) + testutils.AssertContains(t, alerts, "DNS Anomalies in container", "curl", "nginx", []bool{true}) + testutils.AssertNotContains(t, alerts, "DNS Anomalies in container", "wget", "server", []bool{true}) // check network neighborhood nn, _ := wl.GetNetworkNeighborhood() @@ -138,35 +137,35 @@ func Test_02_AllAlertsFromMaliciousApp(t *testing.T) { // Validate that all alerts are signaled expectedAlerts := map[string]bool{ - "Unexpected process launched": false, - "Unexpected file access": false, - "Unexpected system call": false, - "Unexpected capability used": false, - "Kubernetes Client Executed": false, - "Exec from malicious source": false, - "Kernel Module Load": false, - "Exec Binary Not In Base Image": false, - "Exec from mount": false, - "Unexpected Service Account Token Access": false, - "Unexpected domain request": false, - "Crypto Mining Related Port Communication": false, - "Crypto Mining Domain Communication": false, + "Unexpected process launched": false, + "Files Access Anomalies in container": false, + "Syscalls Anomalies in container": false, + "Linux Capabilities Anomalies in container": false, + "Workload uses Kubernetes API unexpectedly": false, + "Process executed from malicious source": false, + "Process tries to load a kernel module": false, + "Drifted process executed": false, + "Process executed from mount": false, + "Unexpected service account token access": false, + "DNS Anomalies in container": false, + "Crypto Mining Related Port Communication": false, + "Crypto Mining Domain Communication": false, } expectedFailOnProfile := map[string][]bool{ - "Unexpected process launched": {true}, - "Unexpected file access": {true}, - "Unexpected system call": {true}, - "Unexpected capability used": {true}, - "Kubernetes Client Executed": {true}, - "Exec from malicious source": {false}, - "Kernel Module Load": {false}, - "Exec Binary Not In Base Image": {true}, - "Exec from mount": {true}, - "Unexpected Service Account Token Access": {true}, - "Unexpected domain request": {true}, - "Crypto Mining Related Port Communication": {true}, - "Crypto Mining Domain Communication": {false}, + "Unexpected process launched": {true}, + "Files Access Anomalies in container": {true}, + "Syscalls Anomalies in container": {true}, + "Linux Capabilities Anomalies in container": {true}, + "Workload uses Kubernetes API unexpectedly": {true}, + "Process executed from malicious source": {false}, + "Process tries to load a kernel module": {false}, + "Drifted process executed": {true}, + "Process executed from mount": {true}, + "Unexpected service account token access": {true}, + "DNS Anomalies in container": {true}, + "Crypto Mining Related Port Communication": {true}, + "Crypto Mining Domain Communication": {false}, } for _, alert := range alerts { @@ -863,14 +862,14 @@ func Test_13_MergingNetworkNeighborhoodTest(t *testing.T) { // Record initial alert count initialAlertCount := 0 for _, alert := range initialAlerts { - if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "DNS Anomalies in container" && alert.Labels["container_name"] == "server" { initialAlertCount++ } } // Verify initial alerts - testutils.AssertContains(t, initialAlerts, "Unexpected domain request", "wget", "server", []bool{true}) - testutils.AssertContains(t, initialAlerts, "Unexpected domain request", "curl", "nginx", []bool{true}) + testutils.AssertContains(t, initialAlerts, "DNS Anomalies in container", "wget", "server", []bool{true}) + testutils.AssertContains(t, initialAlerts, "DNS Anomalies in container", "curl", "nginx", []bool{true}) // PHASE 3: Apply user-managed network neighborhood t.Log("Applying user-managed network neighborhood...") @@ -963,7 +962,7 @@ func Test_13_MergingNetworkNeighborhoodTest(t *testing.T) { // Count new alerts after merge newAlertCount := 0 for _, alert := range mergedAlerts { - if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "DNS Anomalies in container" && alert.Labels["container_name"] == "server" { newAlertCount++ } } @@ -973,7 +972,7 @@ func Test_13_MergingNetworkNeighborhoodTest(t *testing.T) { if newAlertCount > initialAlertCount { t.Logf("Full alert details:") for _, alert := range mergedAlerts { - if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "DNS Anomalies in container" && alert.Labels["container_name"] == "server" { t.Logf("Alert: %+v", alert) } } @@ -1012,7 +1011,7 @@ func Test_13_MergingNetworkNeighborhoodTest(t *testing.T) { // Count final alerts finalAlertCount := 0 for _, alert := range finalAlerts { - if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "DNS Anomalies in container" && alert.Labels["container_name"] == "server" { finalAlertCount++ } } @@ -1022,7 +1021,7 @@ func Test_13_MergingNetworkNeighborhoodTest(t *testing.T) { if finalAlertCount <= initialAlertCount { t.Logf("Full alert details:") for _, alert := range finalAlerts { - if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "Unexpected domain request" && alert.Labels["container_name"] == "server" { + if ruleName, ok := alert.Labels["rule_name"]; ok && ruleName == "DNS Anomalies in container" && alert.Labels["container_name"] == "server" { t.Logf("Alert: %+v", alert) } } @@ -1034,39 +1033,47 @@ func Test_14_RulePoliciesTest(t *testing.T) { ns := testutils.NewRandomNamespace() endpointTraffic, err := testutils.NewTestWorkload(ns.Name, path.Join(utils.CurrentDir(), "resources/endpoint-traffic.yaml")) - require.NoError(t, err, "Error creating workload") + if err != nil { + t.Errorf("Error creating workload: %v", err) + } err = endpointTraffic.WaitForReady(80) - require.NoError(t, err, "Error waiting for workload to be ready") + if err != nil { + t.Errorf("Error waiting for workload to be ready: %v", err) + } // Wait for application profile to be ready - require.NoError(t, endpointTraffic.WaitForApplicationProfile(80, "ready")) + assert.NoError(t, endpointTraffic.WaitForApplicationProfile(80, "ready")) time.Sleep(10 * time.Second) // Add to rule policy symlink _, _, err = endpointTraffic.ExecIntoPod([]string{"ln", "-s", "/etc/shadow", "/tmp/a"}, "") - require.NoError(t, err) + assert.NoError(t, err) _, _, err = endpointTraffic.ExecIntoPod([]string{"rm", "/tmp/a"}, "") - require.NoError(t, err) + assert.NoError(t, err) // Not add to rule policy _, _, err = endpointTraffic.ExecIntoPod([]string{"ln", "/bin/sh", "/tmp/a"}, "") - require.NoError(t, err) + assert.NoError(t, err) _, _, err = endpointTraffic.ExecIntoPod([]string{"rm", "/tmp/a"}, "") - require.NoError(t, err) + assert.NoError(t, err) err = endpointTraffic.WaitForApplicationProfileCompletion(80) - require.NoError(t, err, "Error waiting for application profile to be completed") + if err != nil { + t.Errorf("Error waiting for application profile to be completed: %v", err) + } applicationProfile, err := endpointTraffic.GetApplicationProfile() - require.NoError(t, err, "Error getting application profile") + if err != nil { + t.Errorf("Error getting application profile: %v", err) + } - symlinkPolicy := applicationProfile.Spec.Containers[0].PolicyByRuleId[ruleengine.R1010ID] - require.Equal(t, []string{"ln"}, symlinkPolicy.AllowedProcesses) + symlinkPolicy := applicationProfile.Spec.Containers[0].PolicyByRuleId["R1010"] + assert.Equal(t, []string{"ln"}, symlinkPolicy.AllowedProcesses) - hardlinkPolicy := applicationProfile.Spec.Containers[0].PolicyByRuleId[ruleengine.R1012ID] - require.Len(t, hardlinkPolicy.AllowedProcesses, 0) + hardlinkPolicy := applicationProfile.Spec.Containers[0].PolicyByRuleId["R1012"] + assert.Len(t, hardlinkPolicy.AllowedProcesses, 0) fmt.Println("After completed....") @@ -1076,25 +1083,27 @@ func Test_14_RulePoliciesTest(t *testing.T) { // generate hardlink alert _, _, err = endpointTraffic.ExecIntoPod([]string{"ln", "/etc/shadow", "/tmp/a"}, "") _, _, err = endpointTraffic.ExecIntoPod([]string{"rm", "/tmp/a"}, "") - require.NoError(t, err) + assert.NoError(t, err) // not generate alert _, _, err = endpointTraffic.ExecIntoPod([]string{"ln", "-s", "/etc/shadow", "/tmp/a"}, "") _, _, err = endpointTraffic.ExecIntoPod([]string{"rm", "/tmp/a"}, "") - require.NoError(t, err) + assert.NoError(t, err) // Wait for the alert to be signaled time.Sleep(30 * time.Second) alerts, err := testutils.GetAlerts(endpointTraffic.Namespace) - require.NoError(t, err, "Error getting alerts") + if err != nil { + t.Errorf("Error getting alerts: %v", err) + } - testutils.AssertContains(t, alerts, "Hardlink Created Over Sensitive File", "ln", "endpoint-traffic", []bool{true}) - testutils.AssertNotContains(t, alerts, "Symlink Created Over Sensitive File", "ln", "endpoint-traffic", []bool{true}) + testutils.AssertContains(t, alerts, "Hard link created over sensitive file", "ln", "endpoint-traffic", []bool{true}) + testutils.AssertNotContains(t, alerts, "Soft link created over sensitive file", "ln", "endpoint-traffic", []bool{true}) // Also check for learning mode - testutils.AssertContains(t, alerts, "Symlink Created Over Sensitive File", "ln", "endpoint-traffic", []bool{false}) - testutils.AssertNotContains(t, alerts, "Hardlink Created Over Sensitive File", "ln", "endpoint-traffic", []bool{false}) + testutils.AssertContains(t, alerts, "Soft link created over sensitive file", "ln", "endpoint-traffic", []bool{false}) + testutils.AssertNotContains(t, alerts, "Hard link created over sensitive file", "ln", "endpoint-traffic", []bool{false}) } @@ -1357,7 +1366,7 @@ func Test_21_AlertOnPartialThenLearnNetworkTest(t *testing.T) { time.Sleep(15 * time.Second) alerts, err := testutils.GetAlerts(ns.Name) require.NoError(t, err, "Error getting alerts") - testutils.AssertContains(t, alerts, "Unexpected domain request", "curl", "nginx", []bool{true}) + testutils.AssertContains(t, alerts, "DNS Anomalies in container", "curl", "nginx", []bool{true}) nn, err := wl.GetNetworkNeighborhood() require.NoError(t, err, "Error getting network neighborhood") @@ -1407,7 +1416,7 @@ func Test_21_AlertOnPartialThenLearnNetworkTest(t *testing.T) { // Should not contain new alert for curl command after learning count := 0 for _, alert := range alertsAfter { - if alert.Labels["rule_name"] == "Unexpected domain request" && alert.Labels["container_name"] == "nginx" && alert.Labels["process_name"] == "curl" { + if alert.Labels["rule_name"] == "DNS Anomalies in container" && alert.Labels["container_name"] == "nginx" && alert.Labels["process_name"] == "curl" { count++ } } @@ -1449,7 +1458,7 @@ func Test_22_AlertOnPartialNetworkProfileTest(t *testing.T) { time.Sleep(15 * time.Second) alerts, err := testutils.GetAlerts(ns.Name) require.NoError(t, err, "Error getting alerts") - testutils.AssertContains(t, alerts, "Unexpected domain request", "curl", "nginx", []bool{true}) + testutils.AssertContains(t, alerts, "DNS Anomalies in container", "curl", "nginx", []bool{true}) } func Test_23_RuleCooldownTest(t *testing.T) {