diff --git a/deployments/get/objects.go b/deployments/get/objects.go index ed9d7abe3f..faa80a5df0 100644 --- a/deployments/get/objects.go +++ b/deployments/get/objects.go @@ -612,13 +612,8 @@ func GetKubeArmorControllerClusterRole() *rbacv1.ClusterRole { }, { APIGroups: []string{"apps"}, - Resources: []string{"deployments", "statefulsets", "daemonsets"}, - Verbs: []string{"get", "list", "watch", "update"}, - }, - { - APIGroups: []string{"apps"}, - Resources: []string{"replicasets"}, - Verbs: []string{"get", "list"}, + Resources: []string{"deployments", "statefulsets", "daemonsets", "replicasets"}, + Verbs: []string{"get", "update"}, }, { diff --git a/deployments/helm/KubeArmor/templates/RBAC/roles.yaml b/deployments/helm/KubeArmor/templates/RBAC/roles.yaml index 68d8bbefca..770a64d404 100644 --- a/deployments/helm/KubeArmor/templates/RBAC/roles.yaml +++ b/deployments/helm/KubeArmor/templates/RBAC/roles.yaml @@ -104,10 +104,9 @@ rules: - deployments - statefulsets - daemonsets + - replicasets verbs: - get - - list - - watch - update - apiGroups: - security.kubearmor.com diff --git a/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml b/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml index b01dd2d8ca..dda225054c 100644 --- a/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml +++ b/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml @@ -161,18 +161,10 @@ rules: - deployments - statefulsets - daemonsets - verbs: - - get - - list - - watch - - update -- apiGroups: - - "apps" - resources: - replicasets verbs: - get - - list + - update - apiGroups: - "" resources: diff --git a/pkg/KubeArmorController/cmd/main.go b/pkg/KubeArmorController/cmd/main.go index 4b3b65eb10..f358360505 100644 --- a/pkg/KubeArmorController/cmd/main.go +++ b/pkg/KubeArmorController/cmd/main.go @@ -170,10 +170,10 @@ func main() { }) setupLog.Info("Adding pod refresher controller") if err = (&controllers.PodRefresherReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Cluster: &cluster, - Corev1: client, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Cluster: &cluster, + ClientSet: client, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Pod") os.Exit(1) diff --git a/pkg/KubeArmorController/common/common.go b/pkg/KubeArmorController/common/common.go index 9755fcdaaa..3562e7030b 100644 --- a/pkg/KubeArmorController/common/common.go +++ b/pkg/KubeArmorController/common/common.go @@ -15,11 +15,10 @@ import ( const k8sVisibility = "process,file,network,capabilities" const appArmorAnnotation = "container.apparmor.security.beta.kubernetes.io/" -const KubeArmorRestartedAnnotation = "kubearmor.io/restarted" -const KubeArmorForceAppArmorAnnotation = "kubearmor.io/force-apparmor" +const KubeArmorRestartedAnnotation = "kubearmor.kubernetes.io/restartedAt" // == Add AppArmor annotations == // -func AppArmorAnnotator(pod *corev1.Pod) { +func AppArmorAnnotator(pod *corev1.Pod, binding *corev1.Binding, isBinding bool) { podAnnotations := map[string]string{} var podOwnerName string @@ -67,146 +66,57 @@ func AppArmorAnnotator(pod *corev1.Pod) { if v == "unconfined" { continue } - pod.Annotations[appArmorAnnotation+k] = "localhost/" + v - } -} -func AddCommonAnnotations(pod *corev1.Pod) { - if pod.Annotations == nil { - pod.Annotations = map[string]string{} - } - - // == Policy == // - - if _, ok := pod.Annotations["kubearmor-policy"]; !ok { - // if no annotation is set enable kubearmor by default - pod.Annotations["kubearmor-policy"] = "enabled" - } else if pod.Annotations["kubearmor-policy"] != "enabled" && pod.Annotations["kubearmor-policy"] != "disabled" && pod.Annotations["kubearmor-policy"] != "audited" { - // if kubearmor policy is not set correctly, default it to enabled - pod.Annotations["kubearmor-policy"] = "enabled" - } - // == Exception == // - - // exception: kubernetes app - if pod.Namespace == "kube-system" { - if _, ok := pod.Labels["k8s-app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" + if isBinding { + binding.Annotations[appArmorAnnotation+k] = "localhost/" + v + } else { + pod.Annotations[appArmorAnnotation+k] = "localhost/" + v } - - if value, ok := pod.Labels["component"]; ok { - if value == "etcd" || value == "kube-apiserver" || value == "kube-controller-manager" || value == "kube-scheduler" || value == "kube-proxy" { - pod.Annotations["kubearmor-policy"] = "audited" - } - } - } - - // exception: cilium-operator - if _, ok := pod.Labels["io.cilium/app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" - } - - // exception: kubearmor - if _, ok := pod.Labels["kubearmor-app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" - } - - // == Visibility == // - - if _, ok := pod.Annotations["kubearmor-visibility"]; !ok { - pod.Annotations["kubearmor-visibility"] = k8sVisibility } } -func AddCommonAnnotationsbinding(pod *corev1.Binding) { - if pod.Annotations == nil { - pod.Annotations = map[string]string{} +func AddCommonAnnotations(obj *metav1.ObjectMeta) { + + if obj.Annotations == nil { + obj.Annotations = map[string]string{} } // == Policy == // - if _, ok := pod.Annotations["kubearmor-policy"]; !ok { + if _, ok := obj.Annotations["kubearmor-policy"]; !ok { // if no annotation is set enable kubearmor by default - pod.Annotations["kubearmor-policy"] = "enabled" - } else if pod.Annotations["kubearmor-policy"] != "enabled" && pod.Annotations["kubearmor-policy"] != "disabled" && pod.Annotations["kubearmor-policy"] != "audited" { + obj.Annotations["kubearmor-policy"] = "enabled" + } else if obj.Annotations["kubearmor-policy"] != "enabled" && obj.Annotations["kubearmor-policy"] != "disabled" && obj.Annotations["kubearmor-policy"] != "audited" { // if kubearmor policy is not set correctly, default it to enabled - pod.Annotations["kubearmor-policy"] = "enabled" + obj.Annotations["kubearmor-policy"] = "enabled" } // == Exception == // // exception: kubernetes app - if pod.Namespace == "kube-system" { - if _, ok := pod.Labels["k8s-app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" + if obj.Namespace == "kube-system" { + if _, ok := obj.Labels["k8s-app"]; ok { + obj.Annotations["kubearmor-policy"] = "audited" } - if value, ok := pod.Labels["component"]; ok { + if value, ok := obj.Labels["component"]; ok { if value == "etcd" || value == "kube-apiserver" || value == "kube-controller-manager" || value == "kube-scheduler" || value == "kube-proxy" { - pod.Annotations["kubearmor-policy"] = "audited" + obj.Annotations["kubearmor-policy"] = "audited" } } } // exception: cilium-operator - if _, ok := pod.Labels["io.cilium/app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" + if _, ok := obj.Labels["io.cilium/app"]; ok { + obj.Annotations["kubearmor-policy"] = "audited" } // exception: kubearmor - if _, ok := pod.Labels["kubearmor-app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" + if _, ok := obj.Labels["kubearmor-app"]; ok { + obj.Annotations["kubearmor-policy"] = "audited" } // == Visibility == // - if _, ok := pod.Annotations["kubearmor-visibility"]; !ok { - pod.Annotations["kubearmor-visibility"] = k8sVisibility - } -} -func AppArmorAnnotatorBinding(binding *corev1.Binding, pod *corev1.Pod) { - podAnnotations := map[string]string{} - var podOwnerName string - - // podOwnerName is the pod name for static pods and parent object's name - // in other cases - for _, ownerRef := range pod.ObjectMeta.OwnerReferences { - // pod is owned by a replicaset, daemonset etc thus we use the managing - // controller's name - if ownerRef.Controller != nil && *ownerRef.Controller { - podOwnerName = ownerRef.Name - - if ownerRef.Kind == "ReplicaSet" { - // if it belongs to a replicaset, we also remove the pod template hash - podOwnerName = strings.TrimSuffix(podOwnerName, fmt.Sprintf("-%s", pod.ObjectMeta.Labels["pod-template-hash"])) - } - } - } - if podOwnerName == "" { - // pod is standalone, name remains constant - podOwnerName = pod.ObjectMeta.Name - } - // Get existant kubearmor annotations - for k, v := range pod.Annotations { - if strings.HasPrefix(k, appArmorAnnotation) { - if v == "unconfined" { - containerName := strings.Split(k, "/")[1] - podAnnotations[containerName] = v - } else { - containerName := strings.Split(k, "/")[1] - podAnnotations[containerName] = strings.Split(v, "/")[1] - } - } - } - - // Get the remaining containers / not addressed explecitly in the annotation - for _, container := range pod.Spec.Containers { - if _, ok := podAnnotations[container.Name]; !ok { - podAnnotations[container.Name] = "kubearmor-" + pod.Namespace + "-" + podOwnerName + "-" + container.Name - } - } - // Add kubearmor annotations to the pod - for k, v := range podAnnotations { - if v == "unconfined" { - continue - } - binding.Annotations[appArmorAnnotation+k] = "localhost/" + v + if _, ok := obj.Annotations["kubearmor-visibility"]; !ok { + obj.Annotations["kubearmor-visibility"] = k8sVisibility } } @@ -230,13 +140,13 @@ func CheckKubearmorStatus(nodeName string, c *kubernetes.Clientset) (bool, error if err != nil { return false, fmt.Errorf("failed to list pods: %v", err) } - // Filter Pods by nodeName and return their status.phase for _, pod := range pods.Items { - if pod.Spec.NodeName == nodeName && pod.Status.Phase == "Running" { + if pod.Spec.NodeName == nodeName { return true, nil } } + return false, nil } diff --git a/pkg/KubeArmorController/handlers/pod_mutation.go b/pkg/KubeArmorController/handlers/pod_mutation.go index f73c289827..2d356407d4 100644 --- a/pkg/KubeArmorController/handlers/pod_mutation.go +++ b/pkg/KubeArmorController/handlers/pod_mutation.go @@ -46,7 +46,7 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss } // == common annotations == // - common.AddCommonAnnotationsbinding(binding) + common.AddCommonAnnotations(&binding.ObjectMeta) pod, err := a.ClientSet.CoreV1().Pods(binding.Namespace).Get(context.TODO(), binding.Name, metav1.GetOptions{}) if err != nil { @@ -64,7 +64,7 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss } a.Cluster.ClusterLock.RUnlock() if annotate { - common.AppArmorAnnotatorBinding(binding, pod) + common.AppArmorAnnotator(pod, binding, true) } // == // // send the mutation response @@ -87,7 +87,7 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss pod.Namespace = req.Namespace } // == common annotations == // - common.AddCommonAnnotations(pod) + common.AddCommonAnnotations(&pod.ObjectMeta) nodename := pod.Spec.NodeName annotate := false // == Apparmor annotations == // @@ -100,7 +100,7 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss } a.Cluster.ClusterLock.RUnlock() if annotate { - common.AppArmorAnnotator(pod) + common.AppArmorAnnotator(pod, nil, false) } } diff --git a/pkg/KubeArmorController/informer/nodewatcher.go b/pkg/KubeArmorController/informer/nodewatcher.go index 53e2a38002..9d9c5d0391 100644 --- a/pkg/KubeArmorController/informer/nodewatcher.go +++ b/pkg/KubeArmorController/informer/nodewatcher.go @@ -54,7 +54,7 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge } cluster.Nodes[node.Name].KubeArmorActive = kubearmorStatus if !cluster.Nodes[node.Name].KubeArmorActive { - log.Info(fmt.Sprintf("kubearmor not found on node %s", node.Name)) + log.Info(fmt.Sprintf("kubearmor not found on node %s :", node.Name)) } } // re-compute homogeneous status @@ -95,8 +95,10 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge delete(cluster.Nodes, node.Name) } } - if enforcer == "apparmor" { + if _, ok := cluster.Nodes[node.Name]; !ok { + cluster.Nodes[node.Name] = &types.NodeInfo{} + } cluster.Nodes[node.Name].Enforcer = enforcer var err error kubearmorStatus, err := common.CheckKubearmorStatus(node.Name, c) diff --git a/pkg/KubeArmorController/internal/controller/podrefresh_controller.go b/pkg/KubeArmorController/internal/controller/podrefresh_controller.go index 57bcc7543c..adbba01e80 100644 --- a/pkg/KubeArmorController/internal/controller/podrefresh_controller.go +++ b/pkg/KubeArmorController/internal/controller/podrefresh_controller.go @@ -22,9 +22,9 @@ import ( type PodRefresherReconciler struct { client.Client - Scheme *runtime.Scheme - Cluster *types.Cluster - Corev1 *kubernetes.Clientset + Scheme *runtime.Scheme + Cluster *types.Cluster + ClientSet *kubernetes.Clientset } type ResourceInfo struct { kind string @@ -66,13 +66,14 @@ func (r *PodRefresherReconciler) Reconcile(ctx context.Context, req ctrl.Request } else { enforcer = "bpf" } - r.Cluster.ClusterLock.RUnlock() if _, ok := pod.Annotations["kubearmor-policy"]; !ok { orginalPod := pod.DeepCopy() - common.AddCommonAnnotations(&pod) + common.AddCommonAnnotations(&pod.ObjectMeta) patch := client.MergeFrom(orginalPod) + fmt.Println("patch:", patch) + fmt.Println("pod", pod) err := r.Patch(ctx, &pod, patch) if err != nil { if !errors.IsNotFound(err) { @@ -89,12 +90,11 @@ func (r *PodRefresherReconciler) Reconcile(ctx context.Context, req ctrl.Request // the pod is managed by a controller (e.g: replicaset) if pod.OwnerReferences != nil && len(pod.OwnerReferences) != 0 { // log.Info("Deleting pod " + pod.Name + "in namespace " + pod.Namespace + " as it is managed") - log.Info(fmt.Sprintf("deployment for pod %s will be restarted", pod.Name)) for _, ref := range pod.OwnerReferences { if *ref.Controller { if ref.Kind == "ReplicaSet" { - replicaSet, err := r.Corev1.AppsV1().ReplicaSets(pod.Namespace).Get(ctx, ref.Name, metav1.GetOptions{}) + replicaSet, err := r.ClientSet.AppsV1().ReplicaSets(pod.Namespace).Get(ctx, ref.Name, metav1.GetOptions{}) if err != nil { log.Error(err, fmt.Sprintf("Failed to get ReplicaSet %s:", ref.Name)) continue @@ -147,7 +147,7 @@ func (r *PodRefresherReconciler) Reconcile(ctx context.Context, req ctrl.Request } } - restartResources(deploymentMap, r.Corev1) + restartResources(deploymentMap, r.ClientSet) // give time for pods to be deleted if poddeleted { @@ -184,7 +184,6 @@ func requireRestart(pod corev1.Pod, enforcer string) bool { } func restartResources(resourcesMap map[string]ResourceInfo, corev1 *kubernetes.Clientset) error { - patchannotation := "kubearmor.kubernetes.io/restartedAt" ctx := context.Background() log := log.FromContext(ctx) for name, resInfo := range resourcesMap { @@ -193,13 +192,14 @@ func restartResources(resourcesMap map[string]ResourceInfo, corev1 *kubernetes.C dep, err := corev1.AppsV1().Deployments(resInfo.namespaceName).Get(ctx, name, metav1.GetOptions{}) if err != nil { log.Error(err, fmt.Sprintf("error geting deployment %s in namespace %s", name, resInfo.namespaceName)) + continue } - log.Info("restarting deployment %s in namespace %s", name, resInfo.namespaceName) + log.Info(fmt.Sprintf("restarting deployment %s in namespace %s", name, resInfo.namespaceName)) // Update the Pod template's annotations to trigger a rolling restart if dep.Spec.Template.Annotations == nil { dep.Spec.Template.Annotations = make(map[string]string) } - dep.Spec.Template.Annotations[patchannotation] = time.Now().Format(time.RFC3339) + dep.Spec.Template.Annotations[common.KubeArmorRestartedAnnotation] = time.Now().Format(time.RFC3339) // Patch the Deployment _, err = corev1.AppsV1().Deployments(resInfo.namespaceName).Update(ctx, dep, metav1.UpdateOptions{}) if err != nil { @@ -209,13 +209,14 @@ func restartResources(resourcesMap map[string]ResourceInfo, corev1 *kubernetes.C statefulSet, err := corev1.AppsV1().StatefulSets(resInfo.namespaceName).Get(ctx, name, metav1.GetOptions{}) if err != nil { log.Error(err, fmt.Sprintf("error geting statefulset %s in namespace %s", name, resInfo.namespaceName)) + continue } log.Info("restarting statefulset " + name + " in namespace " + resInfo.namespaceName) // Update the Pod template's annotations to trigger a rolling restart if statefulSet.Spec.Template.Annotations == nil { statefulSet.Spec.Template.Annotations = make(map[string]string) } - statefulSet.Spec.Template.Annotations[patchannotation] = time.Now().Format(time.RFC3339) + statefulSet.Spec.Template.Annotations[common.KubeArmorRestartedAnnotation] = time.Now().Format(time.RFC3339) // Patch the Deployment _, err = corev1.AppsV1().StatefulSets(resInfo.namespaceName).Update(ctx, statefulSet, metav1.UpdateOptions{}) if err != nil { @@ -226,13 +227,14 @@ func restartResources(resourcesMap map[string]ResourceInfo, corev1 *kubernetes.C daemonSet, err := corev1.AppsV1().DaemonSets(resInfo.namespaceName).Get(ctx, name, metav1.GetOptions{}) if err != nil { log.Error(err, fmt.Sprintf("error geting daemonset %s in namespace %s", name, resInfo.namespaceName)) + continue } log.Info("restarting daemonset " + name + " in namespace " + resInfo.namespaceName) // Update the Pod template's annotations to trigger a rolling restart if daemonSet.Spec.Template.Annotations == nil { daemonSet.Spec.Template.Annotations = make(map[string]string) } - daemonSet.Spec.Template.Annotations[patchannotation] = time.Now().Format(time.RFC3339) + daemonSet.Spec.Template.Annotations[common.KubeArmorRestartedAnnotation] = time.Now().Format(time.RFC3339) // Patch the Deployment _, err = corev1.AppsV1().DaemonSets(resInfo.namespaceName).Update(ctx, daemonSet, metav1.UpdateOptions{}) if err != nil { diff --git a/tests/k8s_env/ksp/pre-run-pod.yaml b/tests/k8s_env/ksp/pre-run-pod.yaml index 4177c817c0..d143f95bc3 100644 --- a/tests/k8s_env/ksp/pre-run-pod.yaml +++ b/tests/k8s_env/ksp/pre-run-pod.yaml @@ -4,9 +4,9 @@ metadata: name: nginx --- apiVersion: apps/v1 -kind: ReplicaSet +kind: Deployment metadata: - name: nginx-replicaset + name: nginx-deployment namespace: nginx spec: replicas: 3 @@ -22,6 +22,7 @@ spec: containers: - name: my-container image: nginx + --- apiVersion: apps/v1 kind: StatefulSet