diff --git a/.github/workflows/ci-test-ginkgo.yml b/.github/workflows/ci-test-ginkgo.yml index 9fbb96ef5c..c3648e0fc9 100644 --- a/.github/workflows/ci-test-ginkgo.yml +++ b/.github/workflows/ci-test-ginkgo.yml @@ -133,7 +133,7 @@ jobs: fi docker system prune -a -f docker buildx prune -a -f - helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace --set kubearmorOperator.image.tag=latest + helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace --set kubearmorOperator.image.tag=latest --set kubearmorOperator.annotateExisting=true kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app=kubearmor-operator kubectl get pods -A if [[ ${{ steps.filter.outputs.controller }} == 'true' ]]; then diff --git a/deployments/controller/kubearmor-controller-mutating-webhook-config.yaml b/deployments/controller/kubearmor-controller-mutating-webhook-config.yaml index 6e4d611fd8..7c6d1dbf7b 100644 --- a/deployments/controller/kubearmor-controller-mutating-webhook-config.yaml +++ b/deployments/controller/kubearmor-controller-mutating-webhook-config.yaml @@ -47,6 +47,7 @@ webhooks: - UPDATE resources: - pods + - pods/binding sideEffects: NoneOnDryRun objectSelector: matchExpressions: diff --git a/deployments/get/objects.go b/deployments/get/objects.go index 68f61e6754..b0068f1381 100644 --- a/deployments/get/objects.go +++ b/deployments/get/objects.go @@ -526,6 +526,7 @@ func GetKubeArmorControllerDeployment(namespace string) *appsv1.Deployment { Args: []string{ "--leader-elect", "--health-probe-bind-address=:8081", + "--annotateExisting=false", }, Command: []string{"/manager"}, Ports: []corev1.ContainerPort{ @@ -769,7 +770,7 @@ func GetKubeArmorControllerMutationAdmissionConfiguration(namespace string, caCe Rule: admissionregistrationv1.Rule{ APIGroups: []string{""}, APIVersions: []string{"v1"}, - Resources: []string{"pods"}, + Resources: []string{"pods", "pods/binding"}, }, Operations: []admissionregistrationv1.OperationType{ admissionregistrationv1.Create, diff --git a/deployments/helm/KubeArmor/templates/RBAC/roles.yaml b/deployments/helm/KubeArmor/templates/RBAC/roles.yaml index 2719348f1d..770a64d404 100644 --- a/deployments/helm/KubeArmor/templates/RBAC/roles.yaml +++ b/deployments/helm/KubeArmor/templates/RBAC/roles.yaml @@ -97,7 +97,17 @@ rules: verbs: - get - list - - watch + - watch +- apiGroups: + - "apps" + resources: + - deployments + - statefulsets + - daemonsets + - replicasets + verbs: + - get + - update - apiGroups: - security.kubearmor.com resources: diff --git a/deployments/helm/KubeArmor/templates/deployment.yaml b/deployments/helm/KubeArmor/templates/deployment.yaml index 3f4901cf16..3109672d68 100644 --- a/deployments/helm/KubeArmor/templates/deployment.yaml +++ b/deployments/helm/KubeArmor/templates/deployment.yaml @@ -82,6 +82,7 @@ spec: - args: - --health-probe-bind-address=:8081 - --leader-elect + - --anotateExisting=false command: - /manager image: {{printf "%s:%s" .Values.kubearmorController.image.repository .Values.kubearmorController.image.tag}} diff --git a/deployments/helm/KubeArmor/templates/secrets.yaml b/deployments/helm/KubeArmor/templates/secrets.yaml index a580370d2c..a0b15f3b42 100644 --- a/deployments/helm/KubeArmor/templates/secrets.yaml +++ b/deployments/helm/KubeArmor/templates/secrets.yaml @@ -43,6 +43,7 @@ webhooks: - CREATE - UPDATE resources: - - pods + - pods + - pods/binding scope: '*' sideEffects: NoneOnDryRun diff --git a/deployments/helm/KubeArmorOperator/templates/NOTES.txt b/deployments/helm/KubeArmorOperator/templates/NOTES.txt new file mode 100644 index 0000000000..d50f877c73 --- /dev/null +++ b/deployments/helm/KubeArmorOperator/templates/NOTES.txt @@ -0,0 +1,12 @@ +{{- if not .Values.kubearmorOperator.annotateExisting }} +⚠️ WARNING: Pre-existing pods will not be annotated. Policy enforcement for already existing pods on Apparmor nodes will not work. + • To check enforcer present on nodes use: + ➤ kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name} {.metadata.labels.kubearmor\.io/enforcer}{"\n"}{end}' + • To annotate existing pods use: + ➤ helm upgrade --install {{ .Values.kubearmorOperator.name }} kubearmor/kubearmor-operator -n kubearmor --create-namespace --set annotateExisting=true + Our controller will automatically rollout restart deployments during the Helm upgrade to force the admission controller to add annotations. + • Alternatively, if you prefer manual control, you can restart your deployments yourself: + ➤ kubectl rollout restart deployment -n +{{- end }} +ℹ️ Your release is named {{ .Release.Name }}. +💙 Thank you for installing KubeArmor. diff --git a/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml b/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml index f4eea29378..f453eddd2f 100644 --- a/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml +++ b/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml @@ -28,6 +28,7 @@ rules: verbs: - create - get + - update - apiGroups: - operator.kubearmor.com resources: @@ -155,6 +156,18 @@ rules: - list - watch - update +{{- if .Values.kubearmorOperator.annotateExisting }} +- apiGroups: + - "apps" + resources: + - deployments + - statefulsets + - daemonsets + - replicasets + verbs: + - get + - update +{{- end }} - apiGroups: - "" resources: diff --git a/deployments/helm/KubeArmorOperator/templates/deployment.yaml b/deployments/helm/KubeArmorOperator/templates/deployment.yaml index 6624bb36a2..59b59f9795 100644 --- a/deployments/helm/KubeArmorOperator/templates/deployment.yaml +++ b/deployments/helm/KubeArmorOperator/templates/deployment.yaml @@ -46,6 +46,8 @@ spec: image: {{ include "operatorImage" . }} imagePullPolicy: {{ .Values.kubearmorOperator.imagePullPolicy }} args: + - --annotateExisting={{ .Values.kubearmorOperator.annotateExisting }} + - --annotateResource={{ .Values.kubearmorOperator.annotateResource }} {{- if .Values.kubearmorOperator.args -}} {{- toYaml .Values.kubearmorOperator.args | trim | nindent 8 }} {{- end }} diff --git a/deployments/helm/KubeArmorOperator/values.yaml b/deployments/helm/KubeArmorOperator/values.yaml index fc5fe53e91..66aa8afe89 100644 --- a/deployments/helm/KubeArmorOperator/values.yaml +++ b/deployments/helm/KubeArmorOperator/values.yaml @@ -29,6 +29,7 @@ oci_meta: # in case if image pinning is disabled kubearmorOperator: annotateResource: false + annotateExisting: false name: kubearmor-operator image: repository: kubearmor/kubearmor-operator diff --git a/pkg/KubeArmorController/cmd/main.go b/pkg/KubeArmorController/cmd/main.go index a8ec814895..8117a186a6 100644 --- a/pkg/KubeArmorController/cmd/main.go +++ b/pkg/KubeArmorController/cmd/main.go @@ -49,6 +49,7 @@ func main() { var probeAddr string var secureMetrics bool var enableHTTP2 bool + var annotateExisting bool var tlsOpts []func(*tls.Config) flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+ "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.") @@ -60,6 +61,8 @@ func main() { "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.") flag.BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") + flag.BoolVar(&annotateExisting, "annotateExisting", false, + "If 'true', controller will restart and annotate existing resources with required annotations") opts := zap.Options{ Development: true, } @@ -157,24 +160,24 @@ func main() { cluster := informer.InitCluster() setupLog.Info("Starting node watcher") go informer.NodeWatcher(client, &cluster, ctrl.Log.WithName("informer").WithName("NodeWatcher")) - setupLog.Info("Starting pod watcher") - go informer.PodWatcher(client, &cluster, ctrl.Log.WithName("informer").WithName("PodWatcher")) setupLog.Info("Adding mutation webhook") mgr.GetWebhookServer().Register("/mutate-pods", &webhook.Admission{ Handler: &handlers.PodAnnotator{ - Client: mgr.GetClient(), - Logger: setupLog, - Decoder: admission.NewDecoder(mgr.GetScheme()), - Cluster: &cluster, + Client: mgr.GetClient(), + Logger: setupLog, + Decoder: admission.NewDecoder(mgr.GetScheme()), + Cluster: &cluster, + ClientSet: client, }, }) - setupLog.Info("Adding pod refresher controller") if err = (&controllers.PodRefresherReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Cluster: &cluster, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Cluster: &cluster, + ClientSet: client, + AnnotateExisting: annotateExisting, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Pod") os.Exit(1) diff --git a/pkg/KubeArmorController/common/common.go b/pkg/KubeArmorController/common/common.go index ff0e4c13b4..3562e7030b 100644 --- a/pkg/KubeArmorController/common/common.go +++ b/pkg/KubeArmorController/common/common.go @@ -4,19 +4,21 @@ package common import ( + "context" "fmt" "strings" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) const k8sVisibility = "process,file,network,capabilities" const appArmorAnnotation = "container.apparmor.security.beta.kubernetes.io/" -const KubeArmorRestartedAnnotation = "kubearmor.io/restarted" -const KubeArmorForceAppArmorAnnotation = "kubearmor.io/force-apparmor" +const KubeArmorRestartedAnnotation = "kubearmor.kubernetes.io/restartedAt" // == Add AppArmor annotations == // -func AppArmorAnnotator(pod *corev1.Pod) { +func AppArmorAnnotator(pod *corev1.Pod, binding *corev1.Binding, isBinding bool) { podAnnotations := map[string]string{} var podOwnerName string @@ -64,52 +66,57 @@ func AppArmorAnnotator(pod *corev1.Pod) { if v == "unconfined" { continue } - pod.Annotations[appArmorAnnotation+k] = "localhost/" + v + if isBinding { + binding.Annotations[appArmorAnnotation+k] = "localhost/" + v + } else { + pod.Annotations[appArmorAnnotation+k] = "localhost/" + v + } } } -func AddCommonAnnotations(pod *corev1.Pod) { - if pod.Annotations == nil { - pod.Annotations = map[string]string{} +func AddCommonAnnotations(obj *metav1.ObjectMeta) { + + if obj.Annotations == nil { + obj.Annotations = map[string]string{} } // == Policy == // - if _, ok := pod.Annotations["kubearmor-policy"]; !ok { + if _, ok := obj.Annotations["kubearmor-policy"]; !ok { // if no annotation is set enable kubearmor by default - pod.Annotations["kubearmor-policy"] = "enabled" - } else if pod.Annotations["kubearmor-policy"] != "enabled" && pod.Annotations["kubearmor-policy"] != "disabled" && pod.Annotations["kubearmor-policy"] != "audited" { + obj.Annotations["kubearmor-policy"] = "enabled" + } else if obj.Annotations["kubearmor-policy"] != "enabled" && obj.Annotations["kubearmor-policy"] != "disabled" && obj.Annotations["kubearmor-policy"] != "audited" { // if kubearmor policy is not set correctly, default it to enabled - pod.Annotations["kubearmor-policy"] = "enabled" + obj.Annotations["kubearmor-policy"] = "enabled" } // == Exception == // // exception: kubernetes app - if pod.Namespace == "kube-system" { - if _, ok := pod.Labels["k8s-app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" + if obj.Namespace == "kube-system" { + if _, ok := obj.Labels["k8s-app"]; ok { + obj.Annotations["kubearmor-policy"] = "audited" } - if value, ok := pod.Labels["component"]; ok { + if value, ok := obj.Labels["component"]; ok { if value == "etcd" || value == "kube-apiserver" || value == "kube-controller-manager" || value == "kube-scheduler" || value == "kube-proxy" { - pod.Annotations["kubearmor-policy"] = "audited" + obj.Annotations["kubearmor-policy"] = "audited" } } } // exception: cilium-operator - if _, ok := pod.Labels["io.cilium/app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" + if _, ok := obj.Labels["io.cilium/app"]; ok { + obj.Annotations["kubearmor-policy"] = "audited" } // exception: kubearmor - if _, ok := pod.Labels["kubearmor-app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" + if _, ok := obj.Labels["kubearmor-app"]; ok { + obj.Annotations["kubearmor-policy"] = "audited" } // == Visibility == // - if _, ok := pod.Annotations["kubearmor-visibility"]; !ok { - pod.Annotations["kubearmor-visibility"] = k8sVisibility + if _, ok := obj.Annotations["kubearmor-visibility"]; !ok { + obj.Annotations["kubearmor-visibility"] = k8sVisibility } } @@ -125,3 +132,64 @@ func RemoveApparmorAnnotation(pod *corev1.Pod) { delete(pod.Annotations, key) } } + +func CheckKubearmorStatus(nodeName string, c *kubernetes.Clientset) (bool, error) { + pods, err := c.CoreV1().Pods("kubearmor").List(context.TODO(), metav1.ListOptions{ + LabelSelector: "kubearmor-app=kubearmor", + }) + if err != nil { + return false, fmt.Errorf("failed to list pods: %v", err) + } + // Filter Pods by nodeName and return their status.phase + for _, pod := range pods.Items { + if pod.Spec.NodeName == nodeName { + return true, nil + } + } + + return false, nil + +} +func hasApparmorAnnotation(annotations map[string]string) bool { + for key := range annotations { + if strings.HasPrefix(key, "container.apparmor.security.beta.kubernetes.io/") { + return true + } + } + return false +} + +func HandleAppArmor(annotations map[string]string) bool { + return !hasApparmorAnnotation(annotations) +} + +func HandleBPF(annotations map[string]string) bool { + return hasApparmorAnnotation(annotations) +} + +func IsAppArmorExempt(labels map[string]string, namespace string) bool { + + // exception: kubernetes app + if namespace == "kube-system" { + if _, ok := labels["k8s-app"]; ok { + return true + } + + if value, ok := labels["component"]; ok { + if value == "etcd" || value == "kube-apiserver" || value == "kube-controller-manager" || value == "kube-scheduler" || value == "kube-proxy" { + return true + } + } + } + + // exception: cilium-operator + if _, ok := labels["io.cilium/app"]; ok { + return true + } + + // exception: kubearmor + if _, ok := labels["kubearmor-app"]; ok { + return true + } + return false +} diff --git a/pkg/KubeArmorController/config/webhook/manifests.yaml b/pkg/KubeArmorController/config/webhook/manifests.yaml index 537d93cd04..4eb8863050 100644 --- a/pkg/KubeArmorController/config/webhook/manifests.yaml +++ b/pkg/KubeArmorController/config/webhook/manifests.yaml @@ -23,4 +23,5 @@ webhooks: - UPDATE resources: - pods + - pods/binding sideEffects: NoneOnDryRun diff --git a/pkg/KubeArmorController/handlers/pod_mutation.go b/pkg/KubeArmorController/handlers/pod_mutation.go index 74d67f6b25..2d356407d4 100644 --- a/pkg/KubeArmorController/handlers/pod_mutation.go +++ b/pkg/KubeArmorController/handlers/pod_mutation.go @@ -12,45 +12,98 @@ import ( "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/common" "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/types" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PodAnnotator Structure type PodAnnotator struct { - Client client.Client - Decoder admission.Decoder - Logger logr.Logger - Cluster *types.Cluster + Client client.Client + Decoder admission.Decoder + Logger logr.Logger + Cluster *types.Cluster + ClientSet *kubernetes.Clientset } -// +kubebuilder:webhook:path=/mutate-pods,mutating=true,failurePolicy=Ignore,groups="",resources=pods,verbs=create;update,versions=v1,name=annotation.kubearmor.com,admissionReviewVersions=v1,sideEffects=NoneOnDryRun +// +kubebuilder:webhook:path=/mutate-pods,mutating=true,failurePolicy=Ignore,groups="",resources=pods;pods/binding,verbs=create;update,versions=v1,name=annotation.kubearmor.com,admissionReviewVersions=v1,sideEffects=NoneOnDryRun // Handle Pod Annotation func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admission.Response { - pod := &corev1.Pod{} - if err := a.Decoder.Decode(req, pod); err != nil { - return admission.Errored(http.StatusBadRequest, err) - } + // if it is pod/binding event + if req.Kind.Kind == "Binding" { + binding := &corev1.Binding{} + if err := a.Decoder.Decode(req, binding); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } - // Decode will omit sometimes the namespace value for some reason copying it manually - if pod.Namespace == "" { - pod.Namespace = req.Namespace - } + // Decode will omit sometimes the namespace value for some reason copying it manually + if binding.Namespace == "" { + binding.Namespace = req.Namespace + } - // == common annotations == // - common.AddCommonAnnotations(pod) + // == common annotations == // + common.AddCommonAnnotations(&binding.ObjectMeta) - // == Apparmor annotations == // - a.Cluster.ClusterLock.RLock() - homogenousApparmor := a.Cluster.HomogenousApparmor - a.Cluster.ClusterLock.RUnlock() + pod, err := a.ClientSet.CoreV1().Pods(binding.Namespace).Get(context.TODO(), binding.Name, metav1.GetOptions{}) + if err != nil { + a.Logger.Error(err, "failed to get pod info") + } + nodename := binding.Target.Name + annotate := false + // == Apparmor annotations == // + a.Cluster.ClusterLock.RLock() + // homogenousApparmor := a.Cluster.HomogenousApparmor + if _, exist := a.Cluster.Nodes[nodename]; exist { + if a.Cluster.Nodes[nodename].KubeArmorActive { + annotate = true + } + } + a.Cluster.ClusterLock.RUnlock() + if annotate { + common.AppArmorAnnotator(pod, binding, true) + } + // == // + // send the mutation response + marshaledPod, err := json.Marshal(binding) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + return admission.PatchResponseFromRaw(req.Object.Raw, marshaledPod) - if homogenousApparmor { - common.AppArmorAnnotator(pod) } + // If it is pod CreateEvent + pod := &corev1.Pod{} + if err := a.Decoder.Decode(req, pod); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + if pod.Spec.NodeName != "" { + // Decode will omit sometimes the namespace value for some reason copying it manually + if pod.Namespace == "" { + pod.Namespace = req.Namespace + } + // == common annotations == // + common.AddCommonAnnotations(&pod.ObjectMeta) + nodename := pod.Spec.NodeName + annotate := false + // == Apparmor annotations == // + a.Cluster.ClusterLock.RLock() + // homogenousApparmor := a.Cluster.HomogenousApparmor + if _, exist := a.Cluster.Nodes[nodename]; exist { + if a.Cluster.Nodes[nodename].KubeArmorActive { + annotate = true + } + } + a.Cluster.ClusterLock.RUnlock() + if annotate { + common.AppArmorAnnotator(pod, nil, false) + } + + } // == // // send the mutation response marshaledPod, err := json.Marshal(pod) @@ -58,4 +111,5 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss return admission.Errored(http.StatusInternalServerError, err) } return admission.PatchResponseFromRaw(req.Object.Raw, marshaledPod) + } diff --git a/pkg/KubeArmorController/informer/multienforcer_controller.go b/pkg/KubeArmorController/informer/multienforcer_controller.go deleted file mode 100644 index b9171b51e3..0000000000 --- a/pkg/KubeArmorController/informer/multienforcer_controller.go +++ /dev/null @@ -1,187 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright 2022 Authors of KubeArmor - -package informer - -import ( - "context" - "fmt" - "strings" - - "github.com/go-logr/logr" - "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/common" - "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/types" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" -) - -func hasApparmorAnnotation(annotations map[string]string) bool { - for key := range annotations { - if strings.HasPrefix(key, "container.apparmor.security.beta.kubernetes.io/") { - return true - } - } - return false -} - -func restartPod(c *kubernetes.Clientset, pod *corev1.Pod, apparmor bool, log *logr.Logger) { - name := pod.Name - pod.ResourceVersion = "" - pod.UID = "" - if pod.DeletionTimestamp != nil { - // pod is being deleted - return - } - if pod.Annotations == nil { - pod.Annotations = make(map[string]string) - } - - if pod.OwnerReferences != nil && len(pod.OwnerReferences) != 0 { - - pod.Name = "" - log.Info(fmt.Sprintf("Restarting pod %s", name)) - err := c.CoreV1().Pods(pod.Namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) - if err != nil { - log.Info(fmt.Sprintf("Error while deleting pod %s, error=%s", name, err.Error())) - return - } - - } else { - // Delete static pods - log.Info(fmt.Sprintf("Restarting static pod %s", name)) - err := c.CoreV1().Pods(pod.Namespace).Delete(context.Background(), name, metav1.DeleteOptions{GracePeriodSeconds: new(int64)}) - if err != nil { - log.Info(fmt.Sprintf("Error while deleting static pod %s, error=%s", name, err.Error())) - return - } - - } - /* - annotating pods with apparmor annotations beforehand this is - done due to annotating with mutating webhook can cause a endless loop - */ - if apparmor { - common.AppArmorAnnotator(pod) - } - _, err := c.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) - if err != nil { - log.Info(fmt.Sprintf("Error while restarting pod %s, error=%s", name, err.Error())) - return - } - - log.Info(fmt.Sprintf("Pod %s has been restarted", name)) -} - -func HandleAppArmor(annotations map[string]string) bool { - return !hasApparmorAnnotation(annotations) -} - -func HandleBPF(annotations map[string]string) bool { - return hasApparmorAnnotation(annotations) -} - -func IsAppArmorExempt(labels map[string]string, namespace string) bool { - - // exception: kubernetes app - if namespace == "kube-system" { - if _, ok := labels["k8s-app"]; ok { - return true - } - - if value, ok := labels["component"]; ok { - if value == "etcd" || value == "kube-apiserver" || value == "kube-controller-manager" || value == "kube-scheduler" || value == "kube-proxy" { - return true - } - } - } - - // exception: cilium-operator - if _, ok := labels["io.cilium/app"]; ok { - return true - } - - // exception: kubearmor - if _, ok := labels["kubearmor-app"]; ok { - return true - } - return false -} - -func handlePod(c *kubernetes.Clientset, pod *corev1.Pod, enforcer string, log *logr.Logger) { - if pod.DeletionTimestamp != nil { - // pod is being deleted - return - } - switch enforcer { - case "apparmor": - - if HandleAppArmor(pod.Annotations) && !IsAppArmorExempt(pod.Labels, pod.Namespace) { - restartPod(c, pod, true, log) - } - return - case "bpf": - if HandleBPF(pod.Annotations) { - - common.RemoveApparmorAnnotation(pod) - if !IsAppArmorExempt(pod.Labels, pod.Namespace) { - restartPod(c, pod, false, log) - } - } - default: - log.Info(fmt.Sprintf("Leaving pod %s as it is, could not determine the enforcer", pod.Name)) - } -} - -func PodWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logger) { - log.Info("Starting pod watcher") - - fact := informers.NewSharedInformerFactory(c, 0) - inf := fact.Core().V1().Pods().Informer() - - inf.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - cluster.ClusterLock.RLock() - defer cluster.ClusterLock.RUnlock() - if cluster.HomogeneousStatus { - return - } - if pod, ok := obj.(*corev1.Pod); ok { - if pod.Spec.NodeName != "" { - nodeEnforcer := "" - if _, ok := cluster.Nodes[pod.Spec.NodeName]; ok { - nodeEnforcer = "apparmor" - } else { - nodeEnforcer = "bpf" - } - log.Info(fmt.Sprintf("New pod was added, name=%s enforcer=%s", pod.Name, nodeEnforcer)) - handlePod(c, pod, nodeEnforcer, &log) - } - } - }, - UpdateFunc: func(oldObj, newObj interface{}) { - cluster.ClusterLock.RLock() - defer cluster.ClusterLock.RUnlock() - if cluster.HomogeneousStatus { - return - } - if pod, ok := newObj.(*corev1.Pod); ok { - if pod.Spec.NodeName != "" { - nodeEnforcer := "" - if _, ok := cluster.Nodes[pod.Spec.NodeName]; ok { - nodeEnforcer = "apparmor" - } else { - nodeEnforcer = "bpf" - } - log.Info(fmt.Sprintf("pod was updated, name=%s enforcer=%s", pod.Name, nodeEnforcer)) - handlePod(c, pod, nodeEnforcer, &log) - } - } - }, - }) - - inf.Run(wait.NeverStop) -} diff --git a/pkg/KubeArmorController/informer/nodewatcher.go b/pkg/KubeArmorController/informer/nodewatcher.go index 8054f5ab23..9d9c5d0391 100644 --- a/pkg/KubeArmorController/informer/nodewatcher.go +++ b/pkg/KubeArmorController/informer/nodewatcher.go @@ -8,6 +8,7 @@ import ( "sync" "github.com/go-logr/logr" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/common" "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/types" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -18,7 +19,7 @@ import ( func InitCluster() types.Cluster { return types.Cluster{ - Nodes: make(map[string]string), + Nodes: make(map[string]*types.NodeInfo), HomogeneousStatus: true, ClusterLock: &sync.RWMutex{}, HomogenousApparmor: false, @@ -42,9 +43,19 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge cluster.ClusterLock.Lock() defer cluster.ClusterLock.Unlock() cluster.TotalNodes++ - if enforcer == "apparmor" { - cluster.Nodes[node.Name] = enforcer + + cluster.Nodes[node.Name] = &types.NodeInfo{} + cluster.Nodes[node.Name].Enforcer = enforcer + + kubearmorStatus, err := common.CheckKubearmorStatus(node.Name, c) + if err != nil { + log.Info(fmt.Sprintf("unable to get kubearmor status on node %s : %s", node.Name, err.Error())) + } + cluster.Nodes[node.Name].KubeArmorActive = kubearmorStatus + if !cluster.Nodes[node.Name].KubeArmorActive { + log.Info(fmt.Sprintf("kubearmor not found on node %s :", node.Name)) + } } // re-compute homogeneous status homogeneous := true @@ -80,12 +91,24 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge if enforcer, ok := node.Labels["kubearmor.io/enforcer"]; ok { if _, ok := cluster.Nodes[node.Name]; ok { // in case the enforcer has been updated to bpflsm from apparmor - if enforcer != cluster.Nodes[node.Name] { + if enforcer != cluster.Nodes[node.Name].Enforcer { delete(cluster.Nodes, node.Name) } - } else { - if enforcer == "apparmor" { - cluster.Nodes[node.Name] = enforcer + } + if enforcer == "apparmor" { + if _, ok := cluster.Nodes[node.Name]; !ok { + cluster.Nodes[node.Name] = &types.NodeInfo{} + } + cluster.Nodes[node.Name].Enforcer = enforcer + var err error + kubearmorStatus, err := common.CheckKubearmorStatus(node.Name, c) + if err != nil { + log.Error(err, fmt.Sprintf("unable to get kubearmor status on node %s", node.Name)) + } + cluster.Nodes[node.Name].KubeArmorActive = kubearmorStatus + + if !cluster.Nodes[node.Name].KubeArmorActive { + log.Info(fmt.Sprintf("kubearmor not found on node %s", node.Name)) } } // re-compute homogeneous status @@ -140,7 +163,6 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge } } cluster.HomogenousApparmor = homogeneousApparmor - } }, }) diff --git a/pkg/KubeArmorController/internal/controller/podrefresh_controller.go b/pkg/KubeArmorController/internal/controller/podrefresh_controller.go index 0009fc05a2..79b16ce0d2 100644 --- a/pkg/KubeArmorController/internal/controller/podrefresh_controller.go +++ b/pkg/KubeArmorController/internal/controller/podrefresh_controller.go @@ -9,11 +9,12 @@ import ( "time" "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/common" - "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/informer" "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/types" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -21,21 +22,35 @@ import ( type PodRefresherReconciler struct { client.Client - Scheme *runtime.Scheme - Cluster *types.Cluster + Scheme *runtime.Scheme + Cluster *types.Cluster + ClientSet *kubernetes.Clientset + AnnotateExisting bool +} +type ResourceInfo struct { + kind string + namespaceName string } // +kubebuilder:rbac:groups="",resources=pods,verbs=get;watch;list;create;update;delete func (r *PodRefresherReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.FromContext(ctx) + if !r.AnnotateExisting { + log.Info(fmt.Sprintf("Not annotating existing resources as annotate existing is set to false \n")) + return ctrl.Result{}, nil + } var podList corev1.PodList + if err := r.List(ctx, &podList); err != nil { log.Error(err, "Unable to list pods") return ctrl.Result{}, client.IgnoreNotFound(err) } + log.Info("Watching for blocked pods") poddeleted := false + deploymentMap := make(map[string]ResourceInfo) for _, pod := range podList.Items { if pod.DeletionTimestamp != nil { continue @@ -44,18 +59,24 @@ func (r *PodRefresherReconciler) Reconcile(ctx context.Context, req ctrl.Request continue } r.Cluster.ClusterLock.RLock() + if _, exist := r.Cluster.Nodes[pod.Spec.NodeName]; exist { + if !r.Cluster.Nodes[pod.Spec.NodeName].KubeArmorActive { + log.Info(fmt.Sprintf("skip annotating pod as kubearmor not present on node %s", pod.Spec.NodeName)) + r.Cluster.ClusterLock.RUnlock() + continue + } + } enforcer := "" if _, ok := r.Cluster.Nodes[pod.Spec.NodeName]; ok { enforcer = "apparmor" } else { enforcer = "bpf" } - r.Cluster.ClusterLock.RUnlock() if _, ok := pod.Annotations["kubearmor-policy"]; !ok { orginalPod := pod.DeepCopy() - common.AddCommonAnnotations(&pod) + common.AddCommonAnnotations(&pod.ObjectMeta) patch := client.MergeFrom(orginalPod) err := r.Patch(ctx, &pod, patch) if err != nil { @@ -68,17 +89,44 @@ func (r *PodRefresherReconciler) Reconcile(ctx context.Context, req ctrl.Request // restart not required for special pods and already annotated pods restartPod := requireRestart(pod, enforcer) - if restartPod { // for annotating pre-existing pods on apparmor-nodes // the pod is managed by a controller (e.g: replicaset) if pod.OwnerReferences != nil && len(pod.OwnerReferences) != 0 { - log.Info("Deleting pod " + pod.Name + "in namespace " + pod.Namespace + " as it is managed") - if err := r.Delete(ctx, &pod); err != nil { - if !errors.IsNotFound(err) { - log.Error(err, "Could not delete pod "+pod.Name+" in namespace "+pod.Namespace) + // log.Info("Deleting pod " + pod.Name + "in namespace " + pod.Namespace + " as it is managed") + for _, ref := range pod.OwnerReferences { + + if *ref.Controller { + if ref.Kind == "ReplicaSet" { + replicaSet, err := r.ClientSet.AppsV1().ReplicaSets(pod.Namespace).Get(ctx, ref.Name, metav1.GetOptions{}) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to get ReplicaSet %s:", ref.Name)) + continue + } + // Check if the ReplicaSet is managed by a Deployment + for _, rsOwnerRef := range replicaSet.OwnerReferences { + if rsOwnerRef.Kind == "Deployment" { + deploymentName := rsOwnerRef.Name + deploymentMap[deploymentName] = ResourceInfo{ + kind: rsOwnerRef.Kind, + namespaceName: pod.Namespace, + } + } + } + } else { + deploymentMap[ref.Name] = ResourceInfo{ + namespaceName: pod.Namespace, + kind: ref.Kind, + } + } } } + + // find out deployment--- patch it + // if err := r.Delete(ctx, &pod); err != nil { + // if !errors.IsNotFound(err) { + // log.Error(err, "Could not delete pod "+pod.Name+" in namespace "+pod.Namespace) + // } } else { // single pods // mimic kubectl replace --force @@ -97,10 +145,14 @@ func (r *PodRefresherReconciler) Reconcile(ctx context.Context, req ctrl.Request if err := r.Create(ctx, &pod); err != nil { log.Error(err, "Could not create pod "+pod.Name+" in namespace "+pod.Namespace) } + poddeleted = true } - poddeleted = true + } } + + restartResources(deploymentMap, r.ClientSet) + // give time for pods to be deleted if poddeleted { time.Sleep(10 * time.Second) @@ -128,9 +180,74 @@ func requireRestart(pod corev1.Pod, enforcer string) bool { } // !hasApparmorAnnotations && enforcer == "apparmor" - if informer.HandleAppArmor(pod.Annotations) && enforcer == "apparmor" { + if common.HandleAppArmor(pod.Annotations) && enforcer == "apparmor" { return true } return false } +func restartResources(resourcesMap map[string]ResourceInfo, corev1 *kubernetes.Clientset) error { + + ctx := context.Background() + log := log.FromContext(ctx) + for name, resInfo := range resourcesMap { + switch resInfo.kind { + case "Deployment": + dep, err := corev1.AppsV1().Deployments(resInfo.namespaceName).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + log.Error(err, fmt.Sprintf("error geting deployment %s in namespace %s", name, resInfo.namespaceName)) + continue + } + log.Info(fmt.Sprintf("restarting deployment %s in namespace %s", name, resInfo.namespaceName)) + // Update the Pod template's annotations to trigger a rolling restart + if dep.Spec.Template.Annotations == nil { + dep.Spec.Template.Annotations = make(map[string]string) + } + dep.Spec.Template.Annotations[common.KubeArmorRestartedAnnotation] = time.Now().Format(time.RFC3339) + // Patch the Deployment + _, err = corev1.AppsV1().Deployments(resInfo.namespaceName).Update(ctx, dep, metav1.UpdateOptions{}) + if err != nil { + log.Error(err, fmt.Sprintf("error updating deployment %s in namespace %s", name, resInfo.namespaceName)) + } + case "Statefulset": + statefulSet, err := corev1.AppsV1().StatefulSets(resInfo.namespaceName).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + log.Error(err, fmt.Sprintf("error geting statefulset %s in namespace %s", name, resInfo.namespaceName)) + continue + } + log.Info("restarting statefulset " + name + " in namespace " + resInfo.namespaceName) + // Update the Pod template's annotations to trigger a rolling restart + if statefulSet.Spec.Template.Annotations == nil { + statefulSet.Spec.Template.Annotations = make(map[string]string) + } + statefulSet.Spec.Template.Annotations[common.KubeArmorRestartedAnnotation] = time.Now().Format(time.RFC3339) + // Patch the Deployment + _, err = corev1.AppsV1().StatefulSets(resInfo.namespaceName).Update(ctx, statefulSet, metav1.UpdateOptions{}) + if err != nil { + log.Error(err, fmt.Sprintf("error updating statefulset %s in namespace %s", name, resInfo.namespaceName)) + } + + case "Daemonset": + daemonSet, err := corev1.AppsV1().DaemonSets(resInfo.namespaceName).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + log.Error(err, fmt.Sprintf("error geting daemonset %s in namespace %s", name, resInfo.namespaceName)) + continue + } + log.Info("restarting daemonset " + name + " in namespace " + resInfo.namespaceName) + // Update the Pod template's annotations to trigger a rolling restart + if daemonSet.Spec.Template.Annotations == nil { + daemonSet.Spec.Template.Annotations = make(map[string]string) + } + daemonSet.Spec.Template.Annotations[common.KubeArmorRestartedAnnotation] = time.Now().Format(time.RFC3339) + // Patch the Deployment + _, err = corev1.AppsV1().DaemonSets(resInfo.namespaceName).Update(ctx, daemonSet, metav1.UpdateOptions{}) + if err != nil { + log.Error(err, fmt.Sprintf("error updating daemonset %s in namespace %s", name, resInfo.namespaceName)) + } + } + // wait for few seconds after updating every resource + time.Sleep(5 * time.Second) + } + + return nil +} diff --git a/pkg/KubeArmorController/types/types.go b/pkg/KubeArmorController/types/types.go index cf6fd5c7a4..d113794178 100644 --- a/pkg/KubeArmorController/types/types.go +++ b/pkg/KubeArmorController/types/types.go @@ -11,12 +11,16 @@ import ( ) type Cluster struct { - Nodes map[string]string + Nodes map[string]*NodeInfo HomogeneousStatus bool // the cluster runs the same enforcer HomogenousApparmor bool // the cluster runs with apparmor enforcer ClusterLock *sync.RWMutex TotalNodes int //total no of nodes present } +type NodeInfo struct { + KubeArmorActive bool + Enforcer string +} type MultiEnforcerController struct { Client kubernetes.Clientset diff --git a/pkg/KubeArmorOperator/cmd/operator/main.go b/pkg/KubeArmorOperator/cmd/operator/main.go index f030dfc299..0f343cb000 100755 --- a/pkg/KubeArmorOperator/cmd/operator/main.go +++ b/pkg/KubeArmorOperator/cmd/operator/main.go @@ -31,6 +31,7 @@ var ExtClient *apiextensionsclientset.Clientset var Opv1Client *opv1client.Clientset var Secv1Client *secv1client.Clientset var AnnotateResource bool +var AnnotateExisting bool var InitDeploy bool var LogLevel string var ProviderHostname, ProviderEndpoint string @@ -57,7 +58,7 @@ var Cmd = &cobra.Command{ return nil }, Run: func(cmd *cobra.Command, args []string) { - nodeWatcher := controllers.NewClusterWatcher(K8sClient, Logger, ExtClient, Opv1Client, Secv1Client, PathPrefix, DeploymentName, ProviderHostname, ProviderEndpoint, InitDeploy, AnnotateResource) + nodeWatcher := controllers.NewClusterWatcher(K8sClient, Logger, ExtClient, Opv1Client, Secv1Client, PathPrefix, DeploymentName, ProviderHostname, ProviderEndpoint, InitDeploy, AnnotateResource, AnnotateExisting) go nodeWatcher.WatchConfigCrd() nodeWatcher.WatchNodes() @@ -89,6 +90,7 @@ func init() { Cmd.PersistentFlags().BoolVar(&InitDeploy, "initDeploy", true, "Init container deployment") Cmd.PersistentFlags().StringVar(&LogLevel, "loglevel", "info", "log level, e.g., debug, info, warn, error") Cmd.PersistentFlags().BoolVar(&AnnotateResource, "annotateResource", false, "when true kubearmor annotate k8s resources with apparmor annotation") + Cmd.PersistentFlags().BoolVar(&AnnotateExisting, "annotateExisting", false, "when true kubearmor-controller restarts and annotates existing resources, with required annotations") } // Execute adds all child commands to the root command and sets flags appropriately. diff --git a/pkg/KubeArmorOperator/common/defaults.go b/pkg/KubeArmorOperator/common/defaults.go index a9e80f614e..7d0a0d6cb6 100644 --- a/pkg/KubeArmorOperator/common/defaults.go +++ b/pkg/KubeArmorOperator/common/defaults.go @@ -128,6 +128,7 @@ var ( KubeArmorControllerArgs []string = []string{ "--leader-elect", "--health-probe-bind-address=:8081", + "--annotateExisting=false", } KubeArmorControllerImage string = "kubearmor/kubearmor-controller:latest" KubeArmorControllerImagePullPolicy string = "Always" diff --git a/pkg/KubeArmorOperator/internal/controller/cluster.go b/pkg/KubeArmorOperator/internal/controller/cluster.go index b109574a65..e83be03d48 100755 --- a/pkg/KubeArmorOperator/internal/controller/cluster.go +++ b/pkg/KubeArmorOperator/internal/controller/cluster.go @@ -46,7 +46,7 @@ var informer informers.SharedInformerFactory var deployment_uuid types.UID var deployment_name string = "kubearmor-operator" var PathPrefix string -var initDeploy, annotateResource bool +var initDeploy, annotateResource, annotateExisting bool var ProviderHostname, ProviderEndpoint string type ClusterWatcher struct { @@ -72,7 +72,7 @@ type Node struct { Seccomp string } -func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, extClient *apiextensionsclientset.Clientset, opv1Client *opv1client.Clientset, secv1Client *secv1client.Clientset, pathPrefix, deploy_name, providerHostname, providerEndpoint string, initdeploy, annotateresource bool) *ClusterWatcher { +func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, extClient *apiextensionsclientset.Clientset, opv1Client *opv1client.Clientset, secv1Client *secv1client.Clientset, pathPrefix, deploy_name, providerHostname, providerEndpoint string, initdeploy, annotateresource, annotateexisting bool) *ClusterWatcher { if informer == nil { informer = informers.NewSharedInformerFactory(client, 0) } @@ -90,6 +90,7 @@ func NewClusterWatcher(client *kubernetes.Clientset, log *zap.SugaredLogger, ext deployment_name = deploy_name initDeploy = initdeploy annotateResource = annotateresource + annotateExisting = annotateexisting ProviderHostname = providerHostname ProviderEndpoint = providerEndpoint diff --git a/pkg/KubeArmorOperator/internal/controller/resources.go b/pkg/KubeArmorOperator/internal/controller/resources.go index e4a68a5cf7..c130fd1a87 100755 --- a/pkg/KubeArmorOperator/internal/controller/resources.go +++ b/pkg/KubeArmorOperator/internal/controller/resources.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "net/http" + "reflect" "regexp" "strconv" "strings" @@ -519,7 +520,7 @@ func (clusterWatcher *ClusterWatcher) deployControllerDeployment(deployment *app } } else { if (common.IfNodeWithSecurtiyFs && controller.Spec.Template.Spec.NodeSelector == nil) || - (!common.IfNodeWithSecurtiyFs && controller.Spec.Template.Spec.NodeSelector != nil) { + (!common.IfNodeWithSecurtiyFs && controller.Spec.Template.Spec.NodeSelector != nil) || !reflect.DeepEqual(controller.Spec.Template.Spec.Containers[0].Args, deployment.Spec.Template.Spec.Containers[0].Args) { clusterWatcher.Log.Infof("Updating deployment %s", controller.Name) controller.Spec.Template.Spec.NodeSelector = deployment.Spec.Template.Spec.NodeSelector controller.Spec.Template.Spec.Containers = deployment.Spec.Template.Spec.Containers @@ -700,8 +701,18 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { clusterRoles := []*rbacv1.ClusterRole{ addOwnership(genSnitchRole()).(*rbacv1.ClusterRole), addOwnership(deployments.GetRelayClusterRole()).(*rbacv1.ClusterRole), - addOwnership(deployments.GetKubeArmorControllerClusterRole()).(*rbacv1.ClusterRole), } + controllerClusterRole := addOwnership(deployments.GetKubeArmorControllerClusterRole()).(*rbacv1.ClusterRole) + if annotateExisting { + controllerClusterRole.Rules = append(controllerClusterRole.Rules, []rbacv1.PolicyRule{ + { + APIGroups: []string{"apps"}, + Resources: []string{"deployments", "statefulsets", "daemonsets", "replicasets"}, + Verbs: []string{"get", "update"}, + }, + }...) + } + clusterRoles = append(clusterRoles, controllerClusterRole) kaClusterRole := addOwnership(deployments.GetClusterRole()).(*rbacv1.ClusterRole) if annotateResource { @@ -767,6 +778,9 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { relayServer := deployments.GetRelayDeployment(common.Namespace) // update args, imagePullSecrets and tolerations UpdateArgsIfDefinedAndUpdated(&controller.Spec.Template.Spec.Containers[0].Args, common.KubeArmorControllerArgs) + if annotateExisting { + UpdateArgsIfDefinedAndUpdated(&controller.Spec.Template.Spec.Containers[0].Args, []string{"annotateExisting=true"}) + } UpdateImagePullSecretsIfDefinedAndUpdated(&controller.Spec.Template.Spec.ImagePullSecrets, common.KubeArmorControllerImagePullSecrets) UpdateTolerationsIfDefinedAndUpdated(&controller.Spec.Template.Spec.Tolerations, common.KubeArmorControllerTolerations) if len(controller.Spec.Template.Spec.ImagePullSecrets) < 1 { @@ -965,7 +979,7 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { } for _, clusterRole := range clusterRoles { - _, err = clusterWatcher.Client.RbacV1().ClusterRoles().Get(context.Background(), clusterRole.Name, metav1.GetOptions{}) + role, err := clusterWatcher.Client.RbacV1().ClusterRoles().Get(context.Background(), clusterRole.Name, metav1.GetOptions{}) if isNotfound(err) { clusterWatcher.Log.Infof("Creating cluster role %s", clusterRole.Name) _, err := clusterWatcher.Client.RbacV1().ClusterRoles().Create(context.Background(), clusterRole, metav1.CreateOptions{}) @@ -973,6 +987,14 @@ func (clusterWatcher *ClusterWatcher) WatchRequiredResources() { installErr = err clusterWatcher.Log.Warnf("Cannot create cluster role %s, error=%s", clusterRole.Name, err.Error()) } + } else if err == nil && !reflect.DeepEqual(role.Rules, clusterRole.Rules) { + // update clusterroles if there's a change in rules + clusterWatcher.Log.Infof("Updating cluster role %s", clusterRole.Name) + _, err := clusterWatcher.Client.RbacV1().ClusterRoles().Update(context.Background(), clusterRole, metav1.UpdateOptions{}) + if err != nil { + installErr = err + clusterWatcher.Log.Warnf("Cannot update cluster role %s, error=%s", clusterRole.Name, err.Error()) + } } } diff --git a/tests/k8s_env/ksp/pre-run-pod.yaml b/tests/k8s_env/ksp/pre-run-pod.yaml index 4177c817c0..d143f95bc3 100644 --- a/tests/k8s_env/ksp/pre-run-pod.yaml +++ b/tests/k8s_env/ksp/pre-run-pod.yaml @@ -4,9 +4,9 @@ metadata: name: nginx --- apiVersion: apps/v1 -kind: ReplicaSet +kind: Deployment metadata: - name: nginx-replicaset + name: nginx-deployment namespace: nginx spec: replicas: 3 @@ -22,6 +22,7 @@ spec: containers: - name: my-container image: nginx + --- apiVersion: apps/v1 kind: StatefulSet