Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Defined notes and rules for BSI APP.4.4.A19 #12155

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
documentation_complete: true

title: 'Ensure Deployments have either Anti-Affinity Rules or Topology Spread Constraints'

description: |-
Distributing Kubernetes pods across nodes and availability zones using pod topology spread
constraints and anti-affinity rules enhances high availability, fault tolerance, performance,
and security by balancing workloads, reducing single points of failure, and ensuring compliance
and data protection.

There might be deployments, that do not require high availability or spreading across nodes.
To limit the number of false positives, this rule only checks deployments with a replica count
of more than one. For deployments with one replica, neither anti-affinity rules nor topology
spread constraints provide any value.

To exclude other deployments from this rule, you can create a regular expression for deployment
names: <tt>var_deployments_without_high_availability</tt>. This will ignore deployments matching
those names in all namespaces.

rationale: |-
Distributing Kubernetes pods across nodes and availability zones using pod topology spread
constraints and anti-affinity rules is essential for enhancing high availability, fault
tolerance, and security.
This approach ensures that a single node or AZ failure does not lead to total application
downtime, as workloads are balanced and resources are efficiently utilized.

identifiers:
cce@ocp4: CCE-89351-1

severity: medium

{{% set jqfilter = '[ .items[] | select(.metadata.name | test("{{.var_deployments_without_high_availability}}"; "") | not) | select (.spec.replicas == 0 or .spec.replicas == 1 | not) | select(.spec.template.spec.affinity.podAntiAffinity == null and .spec.template.spec.topologySpreadConstraints == null) | .metadata.namespace + "/" + .metadata.name ]' %}}

ocil_clause: 'Deployments with neither anti-affinity rules or topology spread constraints exist'

ocil: |-
Run the following command to determine anti-affinity rules and topology spread constraints of
all deployments:
<pre>$ oc get deployments -A -o json | jq '{{{ jqfilter }}}'</pre>
Make sure that either suitable anti-affinity rule or topology spread constraints are configured
for all workloads that require high availability.

warnings:
- general: |-
{{{ openshift_filtered_cluster_setting({'/apis/apps/v1/deployments?limit=500': jqfilter}) | indent(4) }}}

template:
name: yamlfile_value
vars:
ocp_data: "true"
filepath: |-
{{{ openshift_filtered_path('/apis/apps/v1/deployments?limit=500', jqfilter) }}}
yamlpath: "[:]"
check_existence: "none_exist"
entity_check: "all"
values:
- value: "(.*?)"
operation: "pattern match"

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,219 @@
#!/bin/bash

# remediation = none
yum install -y jq

kube_apipath="/kubernetes-api-resources"

mkdir -p "$kube_apipath/apis/apps/v1/deployments"

deployment_apipath="/apis/apps/v1/deployments?limit=500"

cat <<EOF > "$kube_apipath$deployment_apipath"
{
"apiVersion": "v1",
"items": [
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"annotations": {
"deployment.kubernetes.io/revision": "143"
},
"creationTimestamp": "2022-04-19T12:58:24Z",
"generation": 143,
"labels": {
"app.kubernetes.io/component": "server",
"app.kubernetes.io/managed-by": "argocd",
"app.kubernetes.io/name": "argocd-server",
"app.kubernetes.io/part-of": "argocd"
},
"name": "argocd-server",
"namespace": "argocd",
"ownerReferences": [
{
"apiVersion": "argoproj.io/v1alpha1",
"blockOwnerDeletion": true,
"controller": true,
"kind": "ArgoCD",
"name": "argocd",
"uid": "366e4fb4-f3b1-4f1e-b319-a886aaae928a"
}
],
"resourceVersion": "1357676941",
"uid": "4572963f-3e9d-4260-a8d7-bda9e557e093"
},
"spec": {
"progressDeadlineSeconds": 600,
"replicas": 3,
"revisionHistoryLimit": 10,
"selector": {
"matchLabels": {
"app.kubernetes.io/name": "argocd-server"
}
},
"strategy": {
"rollingUpdate": {
"maxSurge": "25%",
"maxUnavailable": "25%"
},
"type": "RollingUpdate"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"app.kubernetes.io/name": "argocd-server",
"image.upgraded": "11082023-014723-UTC"
}
},
"spec": {
"containers": [
{
"command": [
"argocd-server",
"--insecure",
"--staticassets",
"/shared/app",
"--dex-server",
"https://argocd-dex-server.argocd.svc.cluster.local:5556",
"--repo-server",
"argocd-repo-server.argocd.svc.cluster.local:8081",
"--redis",
"argocd-redis.argocd.svc.cluster.local:6379",
"--loglevel",
"info",
"--logformat",
"text"
],
"image": "registry.redhat.io/openshift-gitops-1/argocd-rhel8@sha256:f77594bc053be144b33ff9603ee9675c7e82946ec0dbfb04d8f942c8d73155da",
"imagePullPolicy": "Always",
"livenessProbe": {
"failureThreshold": 3,
"httpGet": {
"path": "/healthz",
"port": 8080,
"scheme": "HTTP"
},
"initialDelaySeconds": 3,
"periodSeconds": 30,
"successThreshold": 1,
"timeoutSeconds": 1
},
"name": "argocd-server",
"ports": [
{
"containerPort": 8080,
"protocol": "TCP"
},
{
"containerPort": 8083,
"protocol": "TCP"
}
],
"readinessProbe": {
"failureThreshold": 3,
"httpGet": {
"path": "/healthz",
"port": 8080,
"scheme": "HTTP"
},
"initialDelaySeconds": 3,
"periodSeconds": 30,
"successThreshold": 1,
"timeoutSeconds": 1
},
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/app/config/ssh",
"name": "ssh-known-hosts"
},
{
"mountPath": "/app/config/tls",
"name": "tls-certs"
},
{
"mountPath": "/app/config/server/tls",
"name": "argocd-repo-server-tls"
},
{
"mountPath": "/app/config/server/tls/redis",
"name": "argocd-operator-redis-tls"
}
]
}
],
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"kubernetes.io/os": "linux",
"node-role.kubernetes.io/infra": ""
},
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "argocd-argocd-server",
"serviceAccountName": "argocd-argocd-server",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoSchedule",
"key": "node-role.kubernetes.io/infra",
"operator": "Exists"
}
],
"volumes": [
{
"configMap": {
"defaultMode": 420,
"name": "argocd-ssh-known-hosts-cm"
},
"name": "ssh-known-hosts"
},
{
"configMap": {
"defaultMode": 420,
"name": "argocd-tls-certs-cm"
},
"name": "tls-certs"
},
{
"name": "argocd-repo-server-tls",
"secret": {
"defaultMode": 420,
"optional": true,
"secretName": "argocd-repo-server-tls"
}
},
{
"name": "argocd-operator-redis-tls",
"secret": {
"defaultMode": 420,
"optional": true,
"secretName": "argocd-operator-redis-tls"
}
}
]
}
}
}
}
],
"kind": "List",
"metadata": {
"resourceVersion": ""
}
}
EOF


jq_filter='[ .items[] | select(.metadata.name | test("{{.var_deployments_without_high_availability}}"; "") | not) | select (.spec.replicas == 0 or .spec.replicas == 1 | not) | select(.spec.template.spec.affinity.podAntiAffinity == null and .spec.template.spec.topologySpreadConstraints == null) | .metadata.namespace + "/" + .metadata.name ]'
jq_filter_with_var='[ .items[] | select(.metadata.name | test("^argocd-server$"; "") | not) | select (.spec.replicas == 0 or .spec.replicas == 1 | not) | select(.spec.template.spec.affinity.podAntiAffinity == null and .spec.template.spec.topologySpreadConstraints == null) | .metadata.namespace + "/" + .metadata.name ]'

# Get file path. This will actually be read by the scan
filteredpath="$kube_apipath$deployment_apipath#$(echo -n "$deployment_apipath$jq_filter" | sha256sum | awk '{print $1}')"

# populate filtered path with jq-filtered result
jq "$jq_filter_with_var" "$kube_apipath$deployment_apipath" > "$filteredpath"
Loading
Loading