From e0e395c0a1d6dd82990c49623b2e38206da3519e Mon Sep 17 00:00:00 2001
From: Jeffrey Limnardy <jeffrey.limnardy@sap.com>
Date: Wed, 11 Dec 2024 13:29:01 +0100
Subject: [PATCH 1/5] refactor self-monitor to use golden file

---
 .../resources/selfmonitor/resources_test.go   |  52 ++---
 .../selfmonitor/testdata/self-monitor.yaml    | 202 ++++++++++++++++++
 2 files changed, 228 insertions(+), 26 deletions(-)
 create mode 100644 internal/resources/selfmonitor/testdata/self-monitor.yaml

diff --git a/internal/resources/selfmonitor/resources_test.go b/internal/resources/selfmonitor/resources_test.go
index 240435af2..c3f4c9beb 100644
--- a/internal/resources/selfmonitor/resources_test.go
+++ b/internal/resources/selfmonitor/resources_test.go
@@ -2,6 +2,7 @@ package selfmonitor
 
 import (
 	"context"
+	"os"
 	"testing"
 
 	"github.com/stretchr/testify/require"
@@ -9,9 +10,15 @@ import (
 	corev1 "k8s.io/api/core/v1"
 	networkingv1 "k8s.io/api/networking/v1"
 	rbacv1 "k8s.io/api/rbac/v1"
+	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/util/intstr"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/client/fake"
+	"sigs.k8s.io/controller-runtime/pkg/client/interceptor"
+
+	testutils "github.com/kyma-project/telemetry-manager/internal/utils/test"
 )
 
 const (
@@ -98,9 +105,17 @@ func TestDeleteSelfMonitorResources(t *testing.T) {
 }
 
 func TestApplySelfMonitorResources(t *testing.T) {
-	ctx := context.Background()
-	client := fake.NewClientBuilder().Build()
+	var objects []client.Object
 
+	ctx := context.Background()
+	scheme := runtime.NewScheme()
+	client := fake.NewClientBuilder().WithScheme(scheme).WithInterceptorFuncs(interceptor.Funcs{
+		Create: func(_ context.Context, c client.WithWatch, obj client.Object, _ ...client.CreateOption) error {
+			objects = append(objects, obj)
+			// Nothing has to be created, just add created object to the list
+			return nil
+		},
+	}).Build()
 	sut := ApplierDeleter{
 		Config: Config{
 			BaseName:  name,
@@ -108,6 +123,8 @@ func TestApplySelfMonitorResources(t *testing.T) {
 		},
 	}
 
+	utilruntime.Must(clientgoscheme.AddToScheme(scheme))
+
 	opts := ApplyOptions{
 		AlertRulesFileName:       alertRulesFileName,
 		AlertRulesYAML:           alertRulesYAML,
@@ -118,33 +135,16 @@ func TestApplySelfMonitorResources(t *testing.T) {
 	err := sut.ApplyResources(ctx, client, opts)
 	require.NoError(t, err)
 
-	t.Run("should create collector Config configmap", func(t *testing.T) {
-		verifyConfigMapIsPresent(ctx, t, client)
-	})
-
-	t.Run("should create a deployment", func(t *testing.T) {
-		verifyDeploymentIsPreset(ctx, t, client)
-	})
+	// uncomment to re-generate golden file
+	// testutils.SaveAsYAML(t, scheme, objects, "testdata/self-monitor.yaml")
 
-	t.Run("should create role", func(t *testing.T) {
-		verifyRoleIsPresent(ctx, t, client)
-	})
-
-	t.Run("should create role binding", func(t *testing.T) {
-		verifyRoleBindingIsPresent(ctx, t, client)
-	})
-
-	t.Run("should create service account", func(t *testing.T) {
-		verifyServiceAccountIsPresent(ctx, t, client)
-	})
+	bytes, err := testutils.MarshalYAML(scheme, objects)
+	require.NoError(t, err)
 
-	t.Run("should create network policy", func(t *testing.T) {
-		verifyNetworkPolicy(ctx, t, client)
-	})
+	goldenFileBytes, err := os.ReadFile("testdata/self-monitor.yaml")
+	require.NoError(t, err)
 
-	t.Run("should create service", func(t *testing.T) {
-		verifyService(ctx, t, client)
-	})
+	require.Equal(t, string(goldenFileBytes), string(bytes))
 }
 
 func verifyDeploymentIsPreset(ctx context.Context, t *testing.T, client client.Client) {
diff --git a/internal/resources/selfmonitor/testdata/self-monitor.yaml b/internal/resources/selfmonitor/testdata/self-monitor.yaml
new file mode 100644
index 000000000..8eabc0a9b
--- /dev/null
+++ b/internal/resources/selfmonitor/testdata/self-monitor.yaml
@@ -0,0 +1,202 @@
+apiVersion: v1
+data:
+  dummy-alerts.yaml: dummy alert rules
+  dummy-config.yaml: dummy prometheus Config
+kind: ConfigMap
+metadata:
+  creationTimestamp: null
+  labels:
+    app.kubernetes.io/name: my-self-monitor
+  name: my-self-monitor
+  namespace: my-namespace
+---
+apiVersion: v1
+kind: Service
+metadata:
+  creationTimestamp: null
+  labels:
+    app.kubernetes.io/name: my-self-monitor
+  name: my-self-monitor
+  namespace: my-namespace
+spec:
+  ports:
+  - name: http
+    port: 9090
+    protocol: TCP
+    targetPort: 9090
+  selector:
+    app.kubernetes.io/name: my-self-monitor
+  type: ClusterIP
+status:
+  loadBalancer: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  creationTimestamp: null
+  labels:
+    app.kubernetes.io/name: my-self-monitor
+  name: my-self-monitor
+  namespace: my-namespace
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  creationTimestamp: null
+  labels:
+    app.kubernetes.io/name: my-self-monitor
+  name: my-self-monitor
+  namespace: my-namespace
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app.kubernetes.io/name: my-self-monitor
+  strategy: {}
+  template:
+    metadata:
+      annotations:
+        checksum/Config: 8cdf17444d9a798679152bba8d099a9609ab7bd266ddfbdcc70a091204ff1dd7
+      creationTimestamp: null
+      labels:
+        app.kubernetes.io/name: my-self-monitor
+        sidecar.istio.io/inject: "false"
+    spec:
+      containers:
+      - args:
+        - --storage.tsdb.retention.time=2h
+        - --storage.tsdb.retention.size=50MB
+        - --config.file=/dummy/dummy-config.yaml
+        - --storage.tsdb.path=/prometheus/
+        - --log.format=json
+        env:
+        - name: GOMEMLIMIT
+          value: "150994880"
+        livenessProbe:
+          failureThreshold: 5
+          httpGet:
+            path: /-/healthy
+            port: 9090
+          periodSeconds: 5
+          successThreshold: 1
+          timeoutSeconds: 3
+        name: self-monitor
+        ports:
+        - containerPort: 9090
+          name: http-web
+        readinessProbe:
+          failureThreshold: 3
+          httpGet:
+            path: /-/ready
+            port: 9090
+          periodSeconds: 5
+          successThreshold: 1
+          timeoutSeconds: 3
+        resources:
+          limits:
+            cpu: 200m
+            memory: 180Mi
+          requests:
+            cpu: 10m
+            memory: 50Mi
+        securityContext:
+          allowPrivilegeEscalation: false
+          capabilities:
+            drop:
+            - ALL
+          privileged: false
+          readOnlyRootFilesystem: true
+          runAsNonRoot: true
+          runAsUser: 10001
+          seccompProfile:
+            type: RuntimeDefault
+        volumeMounts:
+        - mountPath: /dummy/
+          name: prometheus-config-volume
+        - mountPath: /prometheus/
+          name: prometheus-storage-volume
+      securityContext:
+        runAsNonRoot: true
+        runAsUser: 10001
+        seccompProfile:
+          type: RuntimeDefault
+      serviceAccountName: my-self-monitor
+      terminationGracePeriodSeconds: 300
+      volumes:
+      - configMap:
+          defaultMode: 420
+          name: my-self-monitor
+        name: prometheus-config-volume
+      - emptyDir:
+          sizeLimit: 1000Mi
+        name: prometheus-storage-volume
+status: {}
+---
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+  creationTimestamp: null
+  labels:
+    app.kubernetes.io/name: my-self-monitor
+  name: my-self-monitor
+  namespace: my-namespace
+spec:
+  egress:
+  - to:
+    - ipBlock:
+        cidr: 0.0.0.0/0
+    - ipBlock:
+        cidr: ::/0
+  ingress:
+  - from:
+    - ipBlock:
+        cidr: 0.0.0.0/0
+    - ipBlock:
+        cidr: ::/0
+    ports:
+    - port: 9090
+      protocol: TCP
+  podSelector:
+    matchLabels:
+      app.kubernetes.io/name: my-self-monitor
+  policyTypes:
+  - Ingress
+  - Egress
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  creationTimestamp: null
+  labels:
+    app.kubernetes.io/name: my-self-monitor
+  name: my-self-monitor
+  namespace: my-namespace
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - services
+  - endpoints
+  - pods
+  verbs:
+  - get
+  - list
+  - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  creationTimestamp: null
+  labels:
+    app.kubernetes.io/name: my-self-monitor
+  name: my-self-monitor
+  namespace: my-namespace
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: my-self-monitor
+subjects:
+- kind: ServiceAccount
+  name: my-self-monitor
+  namespace: my-namespace
+---

From 044a7cbb8048433288dcc1cf763ed4bd81fc5c39 Mon Sep 17 00:00:00 2001
From: Jeffrey Limnardy <jeffrey.limnardy@sap.com>
Date: Wed, 11 Dec 2024 13:29:41 +0100
Subject: [PATCH 2/5] allow easier addition of new tests for agent

---
 .../resources/otelcollector/agent_test.go     | 117 +++++++++++-------
 1 file changed, 75 insertions(+), 42 deletions(-)

diff --git a/internal/resources/otelcollector/agent_test.go b/internal/resources/otelcollector/agent_test.go
index fb7243ea2..cd6b6bab9 100644
--- a/internal/resources/otelcollector/agent_test.go
+++ b/internal/resources/otelcollector/agent_test.go
@@ -19,41 +19,65 @@ import (
 )
 
 func TestAgent_ApplyResources(t *testing.T) {
-	var objects []client.Object
-
-	scheme := runtime.NewScheme()
-	utilruntime.Must(clientgoscheme.AddToScheme(scheme))
-	utilruntime.Must(istiosecurityclientv1.AddToScheme(scheme))
-
-	client := fake.NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{
-		Create: func(_ context.Context, c client.WithWatch, obj client.Object, _ ...client.CreateOption) error {
-			objects = append(objects, obj)
-			// Nothing has to be created, just add created object to the list
-			return nil
-		},
-	}).Build()
-
 	image := "opentelemetry/collector:dummy"
 	namespace := "kyma-system"
 	priorityClassName := "normal"
-	sut := NewMetricAgentApplierDeleter(image, namespace, priorityClassName)
 
-	err := sut.ApplyResources(context.Background(), client, AgentApplyOptions{
-		AllowedPorts:        []int32{5555, 6666},
-		CollectorConfigYAML: "dummy",
-	})
-	require.NoError(t, err)
+	tests := []struct {
+		name           string
+		sut            *AgentApplierDeleter
+		goldenFilePath string
+		saveGoldenFile bool
+	}{
+		{
+			name:           "metric agent",
+			sut:            NewMetricAgentApplierDeleter(image, namespace, priorityClassName),
+			goldenFilePath: "testdata/metric-agent.yaml",
+		},
+	}
+
+	for _, tt := range tests {
+		var objects []client.Object
+
+		scheme := runtime.NewScheme()
+		utilruntime.Must(clientgoscheme.AddToScheme(scheme))
+		utilruntime.Must(istiosecurityclientv1.AddToScheme(scheme))
+
+		fakeClient := fake.NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{
+			Create: func(_ context.Context, c client.WithWatch, obj client.Object, _ ...client.CreateOption) error {
+				objects = append(objects, obj)
+				// Nothing has to be created, just add created object to the list
+				return nil
+			},
+		}).Build()
+
+		t.Run(tt.name, func(t *testing.T) {
+			err := tt.sut.ApplyResources(context.Background(), fakeClient, AgentApplyOptions{
+				AllowedPorts:        []int32{5555, 6666},
+				CollectorConfigYAML: "dummy",
+			})
+			require.NoError(t, err)
+
+			if tt.saveGoldenFile {
+				testutils.SaveAsYAML(t, scheme, objects, tt.goldenFilePath)
+			}
 
-	bytes, err := testutils.MarshalYAML(scheme, objects)
-	require.NoError(t, err)
+			bytes, err := testutils.MarshalYAML(scheme, objects)
+			require.NoError(t, err)
 
-	goldenFileBytes, err := os.ReadFile("testdata/metric-agent.yaml")
-	require.NoError(t, err)
+			goldenFileBytes, err := os.ReadFile(tt.goldenFilePath)
+			require.NoError(t, err)
 
-	require.Equal(t, string(goldenFileBytes), string(bytes))
+			require.Equal(t, string(goldenFileBytes), string(bytes))
+		})
+	}
 }
 
 func TestAgent_DeleteResources(t *testing.T) {
+	image := "opentelemetry/collector:dummy"
+	namespace := "kyma-system"
+	priorityClassName := "normal"
+
 	var created []client.Object
 
 	fakeClient := fake.NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{
@@ -63,23 +87,32 @@ func TestAgent_DeleteResources(t *testing.T) {
 		},
 	}).Build()
 
-	image := "opentelemetry/collector:dummy"
-	namespace := "kyma-system"
-	priorityClassName := "normal"
-	sut := NewMetricAgentApplierDeleter(image, namespace, priorityClassName)
-
-	err := sut.ApplyResources(context.Background(), fakeClient, AgentApplyOptions{
-		AllowedPorts:        []int32{5555, 6666},
-		CollectorConfigYAML: "dummy",
-	})
-	require.NoError(t, err)
-
-	err = sut.DeleteResources(context.Background(), fakeClient)
-	require.NoError(t, err)
+	tests := []struct {
+		name string
+		sut  *AgentApplierDeleter
+	}{
+		{
+			name: "metric agent",
+			sut:  NewMetricAgentApplierDeleter(image, namespace, priorityClassName),
+		},
+	}
 
-	for i := range created {
-		// an update operation on a non-existent object should return a NotFound error
-		err = fakeClient.Get(context.Background(), client.ObjectKeyFromObject(created[i]), created[i])
-		require.True(t, apierrors.IsNotFound(err), "want not found, got %v: %#v", err, created[i])
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			err := tt.sut.ApplyResources(context.Background(), fakeClient, AgentApplyOptions{
+				AllowedPorts:        []int32{5555, 6666},
+				CollectorConfigYAML: "dummy",
+			})
+			require.NoError(t, err)
+
+			err = tt.sut.DeleteResources(context.Background(), fakeClient)
+			require.NoError(t, err)
+
+			for i := range created {
+				// an update operation on a non-existent object should return a NotFound error
+				err = fakeClient.Get(context.Background(), client.ObjectKeyFromObject(created[i]), created[i])
+				require.True(t, apierrors.IsNotFound(err), "want not found, got %v: %#v", err, created[i])
+			}
+		})
 	}
 }

From c7164533828f8884eaabd5933b88c2d1a8fcf040 Mon Sep 17 00:00:00 2001
From: Jeffrey Limnardy <jeffrey.limnardy@sap.com>
Date: Wed, 11 Dec 2024 13:30:28 +0100
Subject: [PATCH 3/5] replace deprecated func

---
 internal/utils/test/marshal.go | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/internal/utils/test/marshal.go b/internal/utils/test/marshal.go
index 47dab4d22..4cfc0c4bc 100644
--- a/internal/utils/test/marshal.go
+++ b/internal/utils/test/marshal.go
@@ -31,7 +31,8 @@ func MarshalYAML(scheme *runtime.Scheme, objects []client.Object) ([]byte, error
 	// Always sort to have a deterministic output
 	slices.SortFunc(objects, compareObjects)
 
-	e := json.NewYAMLSerializer(json.DefaultMetaFactory, scheme, scheme)
+	serializerOpts := json.SerializerOptions{Yaml: true}
+	e := json.NewSerializerWithOptions(json.DefaultMetaFactory, scheme, scheme, serializerOpts)
 
 	var buffer bytes.Buffer
 

From 1ef80a176d251c2e63eeda1f1f04e9d315e18b7d Mon Sep 17 00:00:00 2001
From: Jeffrey Limnardy <jeffrey.limnardy@sap.com>
Date: Wed, 11 Dec 2024 14:17:40 +0100
Subject: [PATCH 4/5] use similar approach for delete resource

---
 .../resources/selfmonitor/resources_test.go   | 290 ++----------------
 1 file changed, 29 insertions(+), 261 deletions(-)

diff --git a/internal/resources/selfmonitor/resources_test.go b/internal/resources/selfmonitor/resources_test.go
index c3f4c9beb..adf400459 100644
--- a/internal/resources/selfmonitor/resources_test.go
+++ b/internal/resources/selfmonitor/resources_test.go
@@ -6,12 +6,8 @@ import (
 	"testing"
 
 	"github.com/stretchr/testify/require"
-	appsv1 "k8s.io/api/apps/v1"
-	corev1 "k8s.io/api/core/v1"
-	networkingv1 "k8s.io/api/networking/v1"
-	rbacv1 "k8s.io/api/rbac/v1"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/runtime"
-	"k8s.io/apimachinery/pkg/util/intstr"
 	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
 	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
 	"sigs.k8s.io/controller-runtime/pkg/client"
@@ -31,79 +27,6 @@ const (
 	alertRulesFileName   = "dummy-alerts.yaml"
 )
 
-func TestDeleteSelfMonitorResources(t *testing.T) {
-	ctx := context.Background()
-	client := fake.NewClientBuilder().Build()
-
-	sut := ApplierDeleter{
-		Config: Config{
-			BaseName:  name,
-			Namespace: namespace,
-		},
-	}
-
-	opts := ApplyOptions{
-		AlertRulesFileName:       alertRulesFileName,
-		AlertRulesYAML:           alertRulesYAML,
-		PrometheusConfigFileName: configFileName,
-		PrometheusConfigPath:     configPath,
-		PrometheusConfigYAML:     prometheusConfigYAML,
-	}
-	err := sut.ApplyResources(ctx, client, opts)
-	require.NoError(t, err)
-
-	t.Run("It should create all resources", func(t *testing.T) {
-		verifyConfigMapIsPresent(ctx, t, client)
-		verifyDeploymentIsPreset(ctx, t, client)
-		verifyRoleIsPresent(ctx, t, client)
-		verifyRoleBindingIsPresent(ctx, t, client)
-		verifyServiceAccountIsPresent(ctx, t, client)
-		verifyNetworkPolicy(ctx, t, client)
-		verifyService(ctx, t, client)
-	})
-
-	err = sut.DeleteResources(ctx, client)
-	require.NoError(t, err)
-
-	t.Run("Deployment should not be present", func(t *testing.T) {
-		var deps appsv1.DeploymentList
-
-		require.NoError(t, client.List(ctx, &deps))
-		require.Len(t, deps.Items, 0)
-	})
-
-	t.Run("Configmap should not be present", func(t *testing.T) {
-		var cms corev1.ConfigMapList
-
-		require.NoError(t, client.List(ctx, &cms))
-		require.Len(t, cms.Items, 0)
-	})
-	t.Run("role should not be present", func(t *testing.T) {
-		var roles rbacv1.RoleList
-
-		require.NoError(t, client.List(ctx, &roles))
-		require.Len(t, roles.Items, 0)
-	})
-	t.Run("role binding should not be present", func(t *testing.T) {
-		var roleBindings rbacv1.RoleBindingList
-
-		require.NoError(t, client.List(ctx, &roleBindings))
-		require.Len(t, roleBindings.Items, 0)
-	})
-	t.Run("network policy should not be present", func(t *testing.T) {
-		var nwPs networkingv1.NetworkPolicyList
-
-		require.NoError(t, client.List(ctx, &nwPs))
-		require.Len(t, nwPs.Items, 0)
-	})
-	t.Run("service should not be present", func(t *testing.T) {
-		var svcList corev1.ServiceList
-
-		require.NoError(t, client.List(ctx, &svcList))
-		require.Len(t, svcList.Items, 0)
-	})
-}
-
 func TestApplySelfMonitorResources(t *testing.T) {
 	var objects []client.Object
 
@@ -147,194 +70,39 @@ func TestApplySelfMonitorResources(t *testing.T) {
 	require.Equal(t, string(goldenFileBytes), string(bytes))
 }
 
-func verifyDeploymentIsPreset(ctx context.Context, t *testing.T, client client.Client) {
-	var deps appsv1.DeploymentList
-
-	require.NoError(t, client.List(ctx, &deps))
-	require.Len(t, deps.Items, 1)
-
-	dep := deps.Items[0]
-	require.Equal(t, name, dep.Name)
-	require.Equal(t, namespace, dep.Namespace)
-
-	// labels
-	require.Equal(t, map[string]string{
-		"app.kubernetes.io/name": name,
-	}, dep.Labels, "must have expected deployment labels")
-	require.Equal(t, map[string]string{
-		"app.kubernetes.io/name": name,
-	}, dep.Spec.Selector.MatchLabels, "must have expected deployment selector labels")
-	require.Equal(t, map[string]string{
-		"app.kubernetes.io/name":  name,
-		"sidecar.istio.io/inject": "false",
-	}, dep.Spec.Template.ObjectMeta.Labels, "must have expected pod labels")
-
-	// annotations
-	podAnnotations := dep.Spec.Template.ObjectMeta.Annotations
-	require.NotEmpty(t, podAnnotations["checksum/Config"])
-
-	// self-monitor container
-	require.Len(t, dep.Spec.Template.Spec.Containers, 1)
-	container := dep.Spec.Template.Spec.Containers[0]
-
-	require.NotNil(t, container.LivenessProbe, "liveness probe must be defined")
-	require.NotNil(t, container.ReadinessProbe, "readiness probe must be defined")
-	resources := container.Resources
-	require.True(t, cpuRequest.Equal(*resources.Requests.Cpu()), "cpu requests should be defined")
-	require.True(t, memoryRequest.Equal(*resources.Requests.Memory()), "memory requests should be defined")
-	require.True(t, memoryLimit.Equal(*resources.Limits.Memory()), "memory limit should be defined")
-
-	// security contexts
-	podSecurityContext := dep.Spec.Template.Spec.SecurityContext
-	require.NotNil(t, podSecurityContext, "pod security context must be defined")
-	require.NotZero(t, podSecurityContext.RunAsUser, "must run as non-root")
-	require.True(t, *podSecurityContext.RunAsNonRoot, "must run as non-root")
-
-	containerSecurityContext := container.SecurityContext
-	require.NotNil(t, containerSecurityContext, "container security context must be defined")
-	require.NotZero(t, containerSecurityContext.RunAsUser, "must run as non-root")
-	require.True(t, *containerSecurityContext.RunAsNonRoot, "must run as non-root")
-	require.False(t, *containerSecurityContext.Privileged, "must not be privileged")
-	require.False(t, *containerSecurityContext.AllowPrivilegeEscalation, "must not escalate to privileged")
-	require.True(t, *containerSecurityContext.ReadOnlyRootFilesystem, "must use readonly fs")
-
-	// command args
-
-	expectedArgs := []string{
-		"--storage.tsdb.retention.time=" + retentionTime,
-		"--storage.tsdb.retention.size=" + retentionSize,
-		"--config.file=" + configPath + configFileName,
-		"--storage.tsdb.path=" + storagePath,
-		"--log.format=" + logFormat,
-	}
-	require.Equal(t, container.Args, expectedArgs)
-}
-
-func verifyConfigMapIsPresent(ctx context.Context, t *testing.T, client client.Client) {
-	var cms corev1.ConfigMapList
-
-	require.NoError(t, client.List(ctx, &cms))
-	require.Len(t, cms.Items, 1)
-
-	cm := cms.Items[0]
-	require.Equal(t, name, cm.Name)
-	require.Equal(t, namespace, cm.Namespace)
-	require.Equal(t, map[string]string{
-		"app.kubernetes.io/name": name,
-	}, cm.Labels)
-	require.Equal(t, prometheusConfigYAML, cm.Data[configFileName])
-	require.Equal(t, alertRulesYAML, cm.Data[alertRulesFileName])
-}
-
-func verifyRoleIsPresent(ctx context.Context, t *testing.T, client client.Client) {
-	var rs rbacv1.RoleList
-
-	require.NoError(t, client.List(ctx, &rs))
-	require.Len(t, rs.Items, 1)
+func TestDeleteSelfMonitorResources(t *testing.T) {
+	var created []client.Object
 
-	r := rs.Items[0]
-	expectedRules := []rbacv1.PolicyRule{
-		{
-			APIGroups: []string{""},
-			Resources: []string{"services", "endpoints", "pods"},
-			Verbs:     []string{"get", "list", "watch"},
+	fakeClient := fake.NewClientBuilder().WithInterceptorFuncs(interceptor.Funcs{
+		Create: func(ctx context.Context, c client.WithWatch, obj client.Object, _ ...client.CreateOption) error {
+			created = append(created, obj)
+			return c.Create(ctx, obj)
 		},
-	}
-
-	require.NotNil(t, r)
-	require.Equal(t, r.Name, name)
-	require.Equal(t, map[string]string{
-		"app.kubernetes.io/name": name,
-	}, r.Labels)
-	require.Equal(t, r.Rules, expectedRules)
-}
-
-func verifyRoleBindingIsPresent(ctx context.Context, t *testing.T, client client.Client) {
-	var rbs rbacv1.RoleBindingList
-
-	require.NoError(t, client.List(ctx, &rbs))
-	require.Len(t, rbs.Items, 1)
-
-	rb := rbs.Items[0]
-	require.NotNil(t, rb)
-	require.Equal(t, name, rb.Name)
-	require.Equal(t, namespace, rb.Namespace)
-	require.Equal(t, map[string]string{
-		"app.kubernetes.io/name": name,
-	}, rb.Labels)
-	require.Equal(t, name, rb.RoleRef.Name)
-}
-
-func verifyServiceAccountIsPresent(ctx context.Context, t *testing.T, client client.Client) {
-	var sas corev1.ServiceAccountList
-
-	require.NoError(t, client.List(ctx, &sas))
-	require.Len(t, sas.Items, 1)
-
-	sa := sas.Items[0]
-	require.NotNil(t, sa)
-	require.Equal(t, name, sa.Name)
-	require.Equal(t, namespace, sa.Namespace)
-	require.Equal(t, map[string]string{
-		"app.kubernetes.io/name": name,
-	}, sa.Labels)
-}
-
-func verifyNetworkPolicy(ctx context.Context, t *testing.T, client client.Client) {
-	var nps networkingv1.NetworkPolicyList
-
-	require.NoError(t, client.List(ctx, &nps))
-	require.Len(t, nps.Items, 1)
-
-	np := nps.Items[0]
-	require.NotNil(t, np)
-	require.Equal(t, name, np.Name)
-	require.Equal(t, namespace, np.Namespace)
-	require.Equal(t, map[string]string{
-		"app.kubernetes.io/name": name,
-	}, np.Labels)
-	require.Equal(t, map[string]string{
-		"app.kubernetes.io/name": name,
-	}, np.Spec.PodSelector.MatchLabels)
-	require.Equal(t, []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, np.Spec.PolicyTypes)
-	require.Len(t, np.Spec.Ingress, 1)
-	require.Len(t, np.Spec.Ingress[0].From, 2)
-	require.Equal(t, "0.0.0.0/0", np.Spec.Ingress[0].From[0].IPBlock.CIDR)
-	require.Equal(t, "::/0", np.Spec.Ingress[0].From[1].IPBlock.CIDR)
-	require.Len(t, np.Spec.Ingress[0].Ports, 1)
+	}).Build()
 
-	tcpProtocol := corev1.ProtocolTCP
-	port9090 := intstr.FromInt32(9090)
-	require.Equal(t, []networkingv1.NetworkPolicyPort{
-		{
-			Protocol: &tcpProtocol,
-			Port:     &port9090,
+	sut := ApplierDeleter{
+		Config: Config{
+			BaseName:  name,
+			Namespace: namespace,
 		},
-	}, np.Spec.Ingress[0].Ports)
-	require.Len(t, np.Spec.Egress, 1)
-	require.Len(t, np.Spec.Egress[0].To, 2)
-	require.Equal(t, "0.0.0.0/0", np.Spec.Egress[0].To[0].IPBlock.CIDR)
-	require.Equal(t, "::/0", np.Spec.Egress[0].To[1].IPBlock.CIDR)
-}
-
-func verifyService(ctx context.Context, t *testing.T, client client.Client) {
-	var svcList corev1.ServiceList
-
-	require.NoError(t, client.List(ctx, &svcList))
-	require.Len(t, svcList.Items, 1)
+	}
 
-	svc := svcList.Items[0]
-	require.NotNil(t, svc)
-	require.Equal(t, name, svc.Name)
-	require.Equal(t, namespace, svc.Namespace)
+	opts := ApplyOptions{
+		AlertRulesFileName:       alertRulesFileName,
+		AlertRulesYAML:           alertRulesYAML,
+		PrometheusConfigFileName: configFileName,
+		PrometheusConfigPath:     configPath,
+		PrometheusConfigYAML:     prometheusConfigYAML,
+	}
+	err := sut.ApplyResources(context.Background(), fakeClient, opts)
+	require.NoError(t, err)
 
-	require.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type)
-	require.Len(t, svc.Spec.Ports, 1)
+	err = sut.DeleteResources(context.Background(), fakeClient)
+	require.NoError(t, err)
 
-	require.Equal(t, corev1.ServicePort{
-		Name:       "http",
-		Protocol:   corev1.ProtocolTCP,
-		Port:       9090,
-		TargetPort: intstr.FromInt32(9090),
-	}, svc.Spec.Ports[0])
+	for i := range created {
+		// an update operation on a non-existent object should return a NotFound error
+		err = fakeClient.Get(context.Background(), client.ObjectKeyFromObject(created[i]), created[i])
+		require.True(t, apierrors.IsNotFound(err), "want not found, got %v: %#v", err, created[i])
+	}
 }

From 6018d89c1112437ec160e9e8bd70e9407e3701d7 Mon Sep 17 00:00:00 2001
From: Jeffrey Limnardy <jeffrey.limnardy@sap.com>
Date: Mon, 16 Dec 2024 12:07:21 +0100
Subject: [PATCH 5/5] regenerate self-monitor golden file

---
 internal/resources/selfmonitor/testdata/self-monitor.yaml | 1 -
 1 file changed, 1 deletion(-)

diff --git a/internal/resources/selfmonitor/testdata/self-monitor.yaml b/internal/resources/selfmonitor/testdata/self-monitor.yaml
index 8eabc0a9b..e2ce8cd54 100644
--- a/internal/resources/selfmonitor/testdata/self-monitor.yaml
+++ b/internal/resources/selfmonitor/testdata/self-monitor.yaml
@@ -94,7 +94,6 @@ spec:
           timeoutSeconds: 3
         resources:
           limits:
-            cpu: 200m
             memory: 180Mi
           requests:
             cpu: 10m