From 291074eecb084d72b3b925b66be9f1876acb3b44 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Sun, 1 Sep 2024 00:39:02 +0530 Subject: [PATCH 01/13] Add support for CIS Openshift 1.6 --- cfg/config.yaml | 7 + cfg/rh-1.6/config.yaml | 2 + cfg/rh-1.6/controlplane.yaml | 67 ++ cfg/rh-1.6/etcd.yaml | 183 +++++ cfg/rh-1.6/master.yaml | 1445 ++++++++++++++++++++++++++++++++++ cfg/rh-1.6/node.yaml | 429 ++++++++++ cfg/rh-1.6/policies.yaml | 287 +++++++ cmd/util.go | 4 +- cmd/util_test.go | 8 + docs/architecture.md | 44 +- docs/platforms.md | 5 +- docs/running.md | 5 +- 12 files changed, 2459 insertions(+), 27 deletions(-) create mode 100644 cfg/rh-1.6/config.yaml create mode 100644 cfg/rh-1.6/controlplane.yaml create mode 100644 cfg/rh-1.6/etcd.yaml create mode 100644 cfg/rh-1.6/master.yaml create mode 100644 cfg/rh-1.6/node.yaml create mode 100644 cfg/rh-1.6/policies.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index d5d170bbd..c0f22d021 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -292,6 +292,7 @@ version_mapping: "ocp-3.10": "rh-0.7" "ocp-3.11": "rh-0.7" "ocp-4.0": "rh-1.0" + "ocp-4.15": "rh-1.6" "aks-1.0": "aks-1.0" "ack-1.0": "ack-1.0" "cis-1.6-k3s": "cis-1.6-k3s" @@ -427,6 +428,12 @@ target_mapping: - "controlplane" - "policies" - "etcd" + "rh-1.6": + - "master" + - "node" + - "controlplane" + - "policies" + - "etcd" "eks-stig-kubernetes-v1r6": - "node" - "controlplane" diff --git a/cfg/rh-1.6/config.yaml b/cfg/rh-1.6/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rh-1.6/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rh-1.6/controlplane.yaml b/cfg/rh-1.6/controlplane.yaml new file mode 100644 index 000000000..2e34bc6aa --- /dev/null +++ b/cfg/rh-1.6/controlplane.yaml @@ -0,0 +1,67 @@ +--- +controls: +version: rh-1.6 +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + audit: | + # To verify user authentication is enabled + oc describe authentication + # To verify that an identity provider is configured + oc get oauth -o json | jq '.items[].spec.identityProviders' + # To verify that a custom cluster-admin user exists + oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin | grep User + # To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + type: manual + remediation: | + Configure an identity provider for the OpenShift cluster. + Understanding identity provider configuration | Authentication | OpenShift + Container Platform 4.15. Once an identity provider has been defined, + you can use RBAC to define and apply permissions. + After you define an identity provider and create a new cluster-admin user, + remove the kubeadmin user to improve cluster security. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: | + #View the audit log profile + oc get apiserver cluster -o json | jq .spec.audit.profile + #To verify kube apiserver audit config + oc get cm -n openshift-kube-apiserver config -o json | jq -r '.data."config.yaml"' | jq .apiServerArguments + #To verify openshift apiserver audit config + oc get cm -n openshift-apiserver config -o json | jq -r '.data."config.yaml"' | jq .apiServerArguments + #Review the audit policies of openshift apiserver + oc get cm -n openshift-apiserver audit -o json | jq -r '.data."policy.yaml"' + #Review the audit policies of kube apiserver + oc get cm -n openshift-kube-apiserver kube-apiserver-audit-policies -o json | jq -r '.data."policy.yaml"' + #To view kube apiserver log files + oc adm node-logs --role=master --path=kube-apiserver/ + #To view openshift apiserver log files + oc adm node-logs --role=master --path=openshift-apiserver/ + type: manual + remediation: | + No remediation required. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + audit: | + #To verify openshift apiserver audit config + oc get configmap -n openshift-kube-apiserver kube-apiserver-audit-policies -ojson | jq -r '.data."policy.yaml"' + #To verify kube apiserver audit config + oc get configmap -n openshift-apiserver audit -o json | jq -r '.data."policy.yaml"' + type: manual + remediation: | + Update the audit log policy profile to use WriteRequestBodies. + scored: false diff --git a/cfg/rh-1.6/etcd.yaml b/cfg/rh-1.6/etcd.yaml new file mode 100644 index 000000000..22ad71b29 --- /dev/null +++ b/cfg/rh-1.6/etcd.yaml @@ -0,0 +1,183 @@ +--- +controls: +version: rh-1.6 +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration Files" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)' + remediation: | + OpenShift does not use the etcd-certfile or etcd-keyfile flags. + Certificates for etcd are managed by the etcd cluster operator. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + remediation: | + This setting is managed by the cluster etcd operator. No remediation required." + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Manual)" + audit: | + # Returns 0 if found, 1 if not found + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + fi + use_multiple_values: true + tests: + test_items: + - flag: "exit_code" + compare: + op: eq + value: "1" + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)' + remediation: | + None. This configuration is managed by the etcd operator. + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: false + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" + audit: | + # Returns 0 if found, 1 if not found + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + fi + use_multiple_values: true + tests: + test_items: + - flag: "exit_code" + compare: + op: eq + value: "1" + remediation: | + This setting is managed by the cluster etcd operator. No remediation required. + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + fi + use_multiple_values: true + tests: + test_items: + - flag: "file" + compare: + op: regex + value: '\/etc\/kubernetes\/static-pod-certs\/configmaps\/etcd-(?:serving|peer-client)-ca\/ca-bundle\.(?:crt|key)' + remediation: | + None required. Certificates for etcd are managed by the OpenShift cluster etcd operator. + scored: false diff --git a/cfg/rh-1.6/master.yaml b/cfg/rh-1.6/master.yaml new file mode 100644 index 000000000..f3f7d01e0 --- /dev/null +++ b/cfg/rh-1.6/master.yaml @@ -0,0 +1,1445 @@ +--- +controls: +version: rh-1.6 +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # For CNI multus + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null + fi + # For SDN pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + + # For OVS pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # For CNI multus + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null + oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null + fi + # For SDN pods + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi + # For OVS pods in 4.5 + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" + audit: | + # Should return root:root for all files and directories + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # echo $i static-pod-certs + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + # echo $i static-pod-resources + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.20 + text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.1.21 + text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + No remediation required; file permissions are managed by the operator. + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that anonymous requests are authorized (Manual)" + audit: | + # To verify that userGroups include system:unauthenticated + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' + # To verify that userGroups include system:unauthenticated + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?.userGroups' + # To verify RBAC is enabled + oc get clusterrolebinding + oc get clusterrole + oc get rolebinding + oc get role + tests: + test_items: + - flag: "system:unauthenticated" + remediation: | + None required. The default configuration should not be modified. + scored: false + + - id: 1.2.2 + text: "Ensure that the --basic-auth-file argument is not set (Manual)" + audit: | + oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "basic-auth" + oc -n openshift-apiserver get cm config -o yaml | grep --color "basic-auth" + # Add | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }; to create AVAILABLE = true/false form + oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' + tests: + bin_op: and + test_items: + - flag: "basic-auth-file" + set: false + - flag: "available" + compare: + op: eq + value: true + remediation: | + None required. --basic-auth-file cannot be configured on OpenShift. + scored: false + + - id: 1.2.3 + text: "Ensure that the --token-auth-file parameter is not set (Manual)" + audit: | + # Verify that the token-auth-file flag is not present + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + #Verify that the authentication operator is running + oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' + tests: + bin_op: and + test_items: + - flag: "token-auth-file" + set: false + - flag: "available" + compare: + op: eq + value: true + remediation: | + None is required. + scored: false + + - id: 1.2.4 + text: "Use https for kubelet connections (Manual)" + audit: | + #for 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + #for 4.6 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #for both 4.5 and 4.6 + oc -n openshift-apiserver describe secret serving-cert + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" + remediation: | + No remediation is required. + OpenShift platform components use X.509 certificates for authentication. + OpenShift manages the CAs and certificates for platform components. This is not configurable. + scored: false + + - id: 1.2.5 + text: "Ensure that the kubelet uses certificates to authenticate (Manual)" + audit: | + #for 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + #for 4.6 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #for both 4.5 and 4.6 + oc -n openshift-apiserver describe secret serving-cert + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" + remediation: | + No remediation is required. + OpenShift platform components use X.509 certificates for authentication. + OpenShift manages the CAs and certificates for platform components. + This is not configurable. + scored: false + + - id: 1.2.6 + text: "Verify that the kubelet certificate authority is set as appropriate (Manual)" + audit: | + # for 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + # for 4.6 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt" + remediation: | + No remediation is required. + OpenShift platform components use X.509 certificates for authentication. + OpenShift manages the CAs and certificates for platform components. + This is not configurable. + scored: false + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" + audit: | + # To verify that the authorization-mode argument is not used + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + # To verify RBAC is configured: + oc get clusterrolebinding + oc get clusterrole + oc get rolebinding + oc get role + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: nothave + value: "AlwaysAllow" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode. + scored: false + + - id: 1.2.8 + text: "Verify that the Node authorizer is enabled (Manual)" + audit: | + # For OCP 4.5 and earlier verify that authorization-mode is not used + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + # For OCP 4.5 and earlier verify that authorization-mode is not used + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: has + value: "Node" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + No remediation is required. + scored: false + + - id: 1.2.9 + text: "Verify that RBAC is enabled (Manual)" + audit: | + # For 4.5 To verify that the authorization-mode argument is not used + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + # To verify RBAC is used + oc get clusterrolebinding + oc get clusterrole + oc get rolebinding + oc get role + # For 4.6, verify that the authorization-mode argument includes RBAC + audit_config: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + tests: + bin_op: or + test_items: + - path: "{.authorization-mode}" + compare: + op: has + value: "RBAC" + - path: "{.authorization-mode}" + flag: "authorization-mode" + set: false + remediation: | + None. It is not possible to disable RBAC. + scored: false + + - id: 1.2.10 + text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)" + audit: | + #Verify the APIPriorityAndFairness feature-gate + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + bin_op: and + test_items: + - flag: "APIPriorityAndFairness=true" + - flag: "EventRateLimit" + set: false + remediation: | + No remediation is required + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)" + audit: | + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "AlwaysAdmit" + set: false + remediation: | + No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift. + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: | + #Verify the set of admissi on-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "AlwaysPullImages" + set: false + remediation: | + None required. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is not set (Manual)" + audit: | + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextDeny and SecurityContextConstraint compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + #Verify that SecurityContextConstraints are deployed + oc get scc + oc describe scc restricted + tests: + bin_op: and + test_items: + - flag: "SecurityContextConstraint" + set: true + - flag: "anyuid" + - flag: "hostaccess" + - flag: "hostmount-anyuid" + - flag: "hostnetwork" + - flag: "node-exporter" + - flag: "nonroot" + - flag: "privileged" + - flag: "restricted" + remediation: | + None required. The Security Context Constraint admission controller cannot be disabled in OpenShift 4. + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Manual)" + audit: | + #Verify the list of admission controllers for 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has ServiceAccount compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + #Verify that Service Accounts are present + oc get sa -A + tests: + test_items: + - flag: "ServiceAccount" + set: true + remediation: | + None required. OpenShift is configured to use service accounts by default. + scored: false + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)" + audit: | + #Verify the list of admission controllers for 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has NamespaceLifecycle compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "NamespaceLifecycle" + remediation: | + Ensure that the --disable-admission-plugins parameter does not include NamespaceLifecycle. + scored: false + + - id: 1.2.16 + text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)" + audit: | + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextConstraint compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + #Verify that SecurityContextConstraints are deployed + oc get scc + oc describe scc restricted + tests: + bin_op: and + test_items: + - flag: "SecurityContextConstraint" + - flag: "anyuid" + - flag: "hostaccess" + - flag: "hostmount-anyuid" + - flag: "hostnetwork" + - flag: "node-exporter" + - flag: "nonroot" + - flag: "privileged" + - flag: "restricted" + remediation: | + None required. Security Context Constraints are enabled by default in OpenShift and cannot be disabled. + scored: false + + - id: 1.2.17 + text: "Ensure that the admission control plugin NodeRestriction is set (Manual)" + audit: | + # For 4.5, review the control plane manifest https://github.com/openshift/origin/blob/release-4.15/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go#L132 + #Verify the set of admission-plugins for OCP 4.6 and higher + oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' + output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') + [ "$output" == "null" ] && echo "ocp 4.5 has NodeRestriction compiled" || echo $output + #Check that no overrides are configured + oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' + tests: + test_items: + - flag: "NodeRestriction" + remediation: | + The NodeRestriction plugin cannot be disabled. + scored: false + + - id: 1.2.18 + text: "Ensure that the --insecure-bind-address argument is not set (Manual)" + audit: | + # InsecureBindAddress=true should not be in the results + oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{range .spec.observedConfig.apiServerArguments.feature-gates[*]}{@}{"\n"}{end}' + # Result should be only 6443 + oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + # Result should be only 8443 + oc -n openshift-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + tests: + bin_op: and + test_items: + - flag: "insecure-bind-address" + set: false + - flag: 6443 + - flag: 8443 + remediation: | + None required. + scored: false + + - id: 1.2.19 + text: "Ensure that the --insecure-port argument is set to 0 (Manual)" + audit: | + # Should return 6443 + oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' + # For OCP 4.6 and above + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]' + output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]') + [ "$output" == "null" ] && echo "ocp 4.5 has insecure-port set to \"0\" compiled" || echo $output + tests: + bin_op: and + test_items: + - flag: "\"0\"" + - flag: "6443" + remediation: | + None required. The configuration is managed by the API server operator. + scored: false + + - id: 1.2.20 + text: "Ensure that the --secure-port argument is not set to 0 (Manual)" + audit: | + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig' + # Should return only 6443 + echo ports=`oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}'` + tests: + bin_op: and + test_items: + - flag: '"bindAddress": "0.0.0.0:6443"' + - flag: "ports" + compare: + op: regex + value: '\s*(?:6443\s*){1,}$' + remediation: | + None required. + scored: false + + - id: 1.2.21 + text: "Ensure that the healthz endpoint is protected by RBAC (Manual)" + type: manual + audit: | + # Verify endpoints + oc -n openshift-kube-apiserver describe endpoints + # Check config for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Test to validate RBAC enabled on the apiserver endpoint; check with non-admin role + oc project openshift-kube-apiserver POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') PORT=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') + # Following should return 403 Forbidden + oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create -n openshift-kube-apiserver sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa -n openshift-kube-apiserver get-token permission-test-sa) + oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete -n openshift-kube-apiserver sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + None required as profiling data is protected by RBAC. + scored: false + + - id: 1.2.22 + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: | + # Should return “/var/log/kube-apiserver/audit.log" + output=$(oc get configmap config -n openshift-kube-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-kube-apiserver -c kube-apiserver $POD ls /var/log/kube-apiserver/audit.log 2>/dev/null + # Should return 0 + echo exit_code=$? + # Should return "/var/log/openshift-apiserver/audit.log" + output=$(oc get configmap config -n openshift-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true + POD=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null + # Should return 0 + echo exit_code=$? + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "/var/log/kube-apiserver/audit.log" + - flag: "/var/log/openshift-apiserver/audit.log" + - flag: "exit_code=0" + - flag: "null" + remediation: | + None required. This is managed by the cluster apiserver operator. + scored: false + + - id: 1.2.23 + text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)" + type: "manual" + remediation: | + Follow the documentation for log forwarding. Forwarding logs to third party systems + https://docs.openshift.com/container-platform/4.15/logging/cluster-logging-external.html + scored: false + + - id: 1.2.24 + text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)" + audit: | + #NOTICE + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "maximumRetainedFiles" + compare: + op: gte + value: 10 + - flag: "audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Set the maximumRetainedFiles parameter to 10 or as an appropriate number of files. maximumRetainedFiles: 10 + scored: false + + - id: 1.2.25 + text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 or as appropriate (Manual)" + audit: | + #NOTICE + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) + [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true + output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true + output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') + [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "maximumFileSizeMegabytes" + compare: + op: gte + value: 100 + - flag: "audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Set the audit-log-maxsize parameter to 100 or as an appropriate number. + maximumFileSizeMegabytes: 100 + scored: false + + - id: 1.2.26 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: | + echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.requestTimeoutSeconds` + tests: + test_items: + - flag: "requestTimeoutSeconds" + remediation: | + TBD + scored: false + + - id: 1.2.27 + text: "Ensure that the --service-account-lookup argument is set to true (Manual)" + audit: | + # For OCP 4.5 + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep service-account-lookup + # For OCP 4.6 and above + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"]' + output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"][0]') + [ "$output" == "null" ] && echo "ocp 4.5 has service-account-lookup=true compiled" || echo service-account-lookup=$output + tests: + test_items: + - flag: "service-account-lookup=true" + remediation: | + TBD + scored: false + + - id: 1.2.28 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[] + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs" + - flag: "/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs" + remediation: | + The OpenShift API server does not use the service-account-key-file argument. + The ServiceAccount token authenticator is configured with serviceAccountConfig.publicKeyFiles. + OpenShift does not reuse the apiserver TLS key. This is not configurable. + scored: false + + - id: 1.2.29 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)" + audit: | + # etcd Certificate File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.certFile + # etcd Key File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.keyFile + # NOTICE 4.6 extention + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-certfile"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-keyfile"]' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key" + remediation: | + OpenShift automatically manages TLS and client certificate authentication for etcd. + This is not configurable. + scored: false + + - id: 1.2.30 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: | + # TLS Cert File - openshift-kube-apiserver + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.certFile + # TLS Key File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.servingInfo.keyFile' + # NOTECI 4.6 extention + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"]' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. You may optionally set a custom default certificate to be used by the API server + when serving content in order to enable clients to access the API server at a different host name or without + the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. + Follow the directions in the OpenShift documentation User-provided certificates for the API server + scored: false + + - id: 1.2.31 + text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" + audit: | + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.clientCA + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. You may optionally set a custom default certificate to be used by the API + server when serving content in order to enable clients to access the API server at a different host name + or without the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. + + User-provided certificates must be provided in a kubernetes.io/tls type Secret in the openshift-config namespace. + Update the API server cluster configuration, + the apiserver/cluster resource, to enable the use of the user-provided certificate. + scored: false + + - id: 1.2.32 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)" + audit: | + #etcd CA File + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.ca + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt" + remediation: | + None required. OpenShift generates the etcd-cafile and sets the arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA. + scored: false + + - id: 1.2.33 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: | + # encrypt the etcd datastore + oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' + tests: + test_items: + - flag: "EncryptionCompleted" + remediation: | + Follow the OpenShift documentation for Encrypting etcd data | Authentication | OpenShift Container Platform 4.15 + https://docs.openshift.com/container-platform/4.15/security/encrypting-etcd.html + scored: false + + - id: 1.2.34 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + # encrypt the etcd datastore + oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' + tests: + test_items: + - flag: "EncryptionCompleted" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.35 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + type: manual + audit: | + # verify cipher suites + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + oc get kubeapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc describe --namespace=openshift-ingress-operator ingresscontroller/default + remediation: | + Verify that the tlsSecurityProfile is set to the value you chose. + Note: The HAProxy Ingress controller image does not support TLS 1.3 + and because the Modern profile requires TLS 1.3, it is not supported. + The Ingress Operator converts the Modern profile to Intermediate. + The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, + and TLS 1.3 of a Custom profile to 1.2. + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that garbage collection is configured as appropriate (Manual)" + type: manual + remediation: | + To configure, follow the directions in Configuring garbage collection for containers and images + https://docs.openshift.com/container-platform/4.15/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring + scored: false + + - id: 1.3.2 + text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)" + type: manual + audit: | + # Verify configuration for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Verify endpoints + oc -n openshift-kube-controller-manager describe endpoints + # Test to validate RBAC enabled on the controller endpoint; check with non-admin role + oc project openshift-kube-controller-manage + POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') + PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') + # Following should return 403 Forbidden + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create -n openshift-kube-controller-manager sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa -n openshift-kube-controller-manager get-token permission-test-sa) + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete -n openshift-kube-controller-manager sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + None required; profiling is protected by RBAC. + scored: false + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)" + audit: | + echo use-service-account-credentials=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["use-service-account-credentials"][]'` + tests: + test_items: + - flag: "use-service-account-credentials" + compare: + op: eq + value: true + remediation: | + The OpenShift Controller Manager operator manages and updates the OpenShift Controller Manager. + The Kubernetes Controller Manager operator manages and updates the Kubernetes Controller Manager deployed on top of OpenShift. + This operator is configured via KubeControllerManager custom resource. + scored: false + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["service-account-private-key-file"][]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key" + remediation: | + None required. + OpenShift manages the service account credentials for the scheduler automatically. + scored: false + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["root-ca-file"][]' + tests: + test_items: + - flag: "/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt" + remediation: | + None required. + Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. + scored: false + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: | + oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["feature-gates"][]' + tests: + test_items: + - flag: "RotateKubeletServerCertificate" + compare: + op: eq + value: "true" + remediation: | + None required. + Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. + scored: false + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Manual)" + audit: | + echo port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["port"][]'` + echo secure-port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["secure-port"][]'` + #Following should fail with a http code 403 + POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') + oc rsh -n openshift-kube-controller-manager -c kube-controller-manager $POD curl https://localhost:10257/metrics -k + tests: + bin_op: and + test_items: + - flag: "secure-port" + compare: + op: eq + value: "\"10257\"" + - flag: "port" + compare: + op: eq + value: "\"0\"" + - flag: "\"code\": 403" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the master node and ensure the correct value for the --bind-address parameter + scored: false + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the healthz endpoints for the scheduler are protected by RBAC (Manual)" + type: manual + audit: | + # check configuration for ports, livenessProbe, readinessProbe, healthz + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # Test to verify endpoints + oc -n openshift-kube-scheduler describe endpoints + # Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role + oc project openshift-kube-scheduler + POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + # Should return 403 Forbidden + oc rsh ${POD} curl http://localhost:${PORT}/metrics -k + # Create a service account to test RBAC + oc create sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa get-token permission-test-sa) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + A fix to this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 None required. + Profiling is protected by RBAC and cannot be disabled. + scored: false + + - id: 1.4.2 + text: "Verify that the scheduler API service is protected by authentication and authorization (Manual)" + type: manual + audit: | + # To verify endpoints + oc -n openshift-kube-scheduler describe endpoints + # To verify that bind-adress is not used in the configuration and that port is set to 0 + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + # To test for RBAC: + oc project openshift-kube-scheduler + POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + POD_IP=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}') + PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + # Should return a 403 + oc rsh ${POD} curl http://${POD_IP}:${PORT}/metrics + # Create a service account to test RBAC + oc create sa permission-test-sa + # Should return 403 Forbidden + SA_TOKEN=$(oc sa get-token permission-test-sa) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k + # Cleanup + oc delete sa permission-test-sa + # As cluster admin, should succeed + CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + remediation: | + By default, the --bind-address argument is not present, + the readinessProbe and livenessProbe arguments are set to 10251 and the port argument is set to 0. + Check the status of this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 + scored: false diff --git a/cfg/rh-1.6/node.yaml b/cfg/rh-1.6/node.yaml new file mode 100644 index 000000000..0f29a7066 --- /dev/null +++ b/cfg/rh-1.6/node.yaml @@ -0,0 +1,429 @@ +--- +controls: +version: rh-1.6 +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, the kubelet service file has permissions of 644. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: | + # Should return root:root for each node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null + tests: + test_items: + - flag: root:root + remediation: | + By default, the kubelet service file has ownership of root:root. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null + fi + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + None needed. + scored: false + + - id: 4.1.4 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null + fi + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + None required. The configuration is managed by OpenShift operators. + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" + audit: | + # Check permissions + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: false + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: false + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + None required. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: root:root + remediation: | + None required. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "enabled: true" + set: false + remediation: | + Follow the instructions in the documentation to create a Kubelet config CRD + and set the anonymous-auth is set to false. + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" + type: manual + # Takes a lot of time for connection to fail and + audit: | + POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') + TOKEN=$(oc whoami -t) + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "Connection timed out" + remediation: | + None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes. + scored: false + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' + remediation: | + None required. Changing the clientCAFile value is unsupported. + scored: true + + - id: 4.2.4 + text: "Verify that the read only port is not used or is set to 0 (Automated)" + audit: | + echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null + echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null + tests: + bin_op: or + test_items: + - flag: "read-only-port" + compare: + op: has + value: "[\"0\"]" + - flag: "read-only-port" + set: false + remediation: | + In earlier versions of OpenShift 4, the read-only-port argument is not used. + Follow the instructions in the documentation to create a Kubelet config CRD + and set the --read-only-port is set to 0. + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: | + # Should return 1 for node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null + echo exit_code=$? + # Should return 1 for node + oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null + echo exit_code=$? + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: --streaming-connection-idle-timeout + compare: + op: noteq + value: 0 + - flag: streamingConnectionIdleTimeout + compare: + op: noteq + value: 0s + - flag: "exit_code" + compare: + op: eq + value: 1 + remediation: | + Follow the instructions in the documentation to create a Kubelet config CRD and set + the --streaming-connection-idle-timeout to the desired value. Do not set the value to 0. + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null + tests: + test_items: + - flag: protectKernelDefaults + set: false + remediation: | + None required. The OpenShift 4 kubelet modifies the system tunable; + using the protect-kernel-defaults flag will cause the kubelet to fail on start if the tunables + don't match the kubelet configuration and the OpenShift node will fail to start. + scored: false + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)" + audit: | + /bin/bash + flag=make-iptables-util-chains + opt=makeIPTablesUtilChains + # look at each machineconfigpool + while read -r pool nodeconfig; do + # true by default + value='true' + # first look for the flag + oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.systemd[][] | select(.name=="kubelet.service") | .contents' | sed -n "/^ExecStart=/,/^\$/ { /^\\s*--$flag=false/ q 100 }" + # if the above command exited with 100, the flag was false + [ $? == 100 ] && value='false' + # now look in the yaml KubeletConfig + yamlconfig=$(oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.storage.files[] | select(.path=="/etc/kubernetes/kubelet.conf") | .contents.source ' | sed 's/^data:,//' | while read; do echo -e ${REPLY//%/\\x}; done) + echo "$yamlconfig" | sed -n "/^$opt:\\s*false\\s*$/ q 100" + [ $? == 100 ] && value='false' + echo "Pool $pool has $flag ($opt) set to $value" + done < <(oc get machineconfigpools -o json | jq -r '.items[] | select(.status.machineCount>0) | .metadata.name + " " + .spec.configuration.name') + use_multiple_values: true + tests: + test_items: + - flag: "set to true" + remediation: | + None required. The --make-iptables-util-chains argument is set to true by default. + scored: false + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + audit: | + echo `oc get machineconfig 01-worker-kubelet -o yaml | grep hostname-override` + echo `oc get machineconfig 01-master-kubelet -o yaml | grep hostname-override` + tests: + test_items: + - flag: hostname-override + set: false + remediation: | + By default, --hostname-override argument is not set. + scored: false + + - id: 4.2.9 + text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf; + oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 + oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 + type: "manual" + remediation: | + Follow the documentation to edit kubelet parameters + https://docs.openshift.com/container-platform/4.15/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters + KubeAPIQPS: + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: | + oc get configmap config -n openshift-kube-apiserver -o json \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments | + .["kubelet-client-certificate"][0], + .["kubelet-client-key"][0] + ' + tests: + bin_op: and + test_items: + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key" + remediation: | + OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. + This is not configurable. + scored: true + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + audit: | + #Verify the rotateKubeletClientCertificate feature gate is not set to false + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null + # Verify the rotateCertificates argument is set to true + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: rotateCertificates + compare: + op: eq + value: true + - flag: rotateKubeletClientCertificates + compare: + op: noteq + value: false + - flag: rotateKubeletClientCertificates + set: false + remediation: | + None required. + scored: false + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: | + #Verify the rotateKubeletServerCertificate feature gate is on + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null + # Verify the rotateCertificates argument is set to true + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: rotateCertificates + compare: + op: eq + value: true + - flag: RotateKubeletServerCertificate + compare: + op: eq + value: true + remediation: | + By default, kubelet server certificate rotation is disabled. + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: | + # needs verification + # verify cipher suites + oc describe --namespace=openshift-ingress-operator ingresscontroller/default + oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + #check value for tlsSecurityProfile; null is returned if default is used + oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile + type: manual + remediation: | + Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile. + Configuring Ingress + scored: false diff --git a/cfg/rh-1.6/policies.yaml b/cfg/rh-1.6/policies.yaml new file mode 100644 index 000000000..fb39ca0d4 --- /dev/null +++ b/cfg/rh-1.6/policies.yaml @@ -0,0 +1,287 @@ +--- +controls: +version: rh-1.6 +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + audit: | + #To get a list of users and service accounts with the cluster-admin role + oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | + grep cluster-admin + #To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + audit: | + #needs verification + oc get roles --all-namespaces -o yaml + for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc + describe clusterrole ${i}; done + #Retrieve the cluster roles defined in the cluster and review for wildcards + oc get clusterroles -o yaml + for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do + oc describe clusterrole ${i}; done + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers (Manual)" + audit: | + # needs verification + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow + Privileged field is set to false. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + PID field is set to false. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + IPC field is set to false. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host + Network field is omitted or set to false. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + audit: | + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation + tests: + test_items: + - flag: "false" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Allow + Privilege Escalation field is omitted or set to false. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers (Manual)" + audit: | + # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' + oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type + #For SCCs with MustRunAs verify that the range of UIDs does not include 0 + oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax + tests: + bin_op: or + test_items: + - flag: "MustRunAsNonRoot" + - flag: "MustRunAs" + compare: + op: nothave + value: 0 + remediation: | + None required. By default, OpenShift includes the non-root SCC with the the Run As User + Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the + OpenShift documentation to create custom SCCs. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + audit: | + # needs verification + oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities + tests: + bin_op: or + test_items: + - flag: "ALL" + - flag: "NET_RAW" + remediation: | + Create a SCC as described in the OpenShift documentation, ensuring that the Required + Drop Capabilities is set to include either NET_RAW or ALL. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster + except for the privileged SCC. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider + adding a SCC which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + None required. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + audit: | + #Run the following command and review the NetworkPolicy objects created in the cluster. + oc -n all get networkpolicy + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + audit: | + #Run the following command to find references to objects which use environment variables defined from secrets. + oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} + {.metadata.name} {"\n"}{end}' -A + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.15/openshift_images/image-configuration.html + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + audit: | + #Run the following command and review the namespaces created in the cluster. + oc get namespaces + #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + To enable the default seccomp profile, use the reserved value /runtime/default that will + make sure that the pod uses the default policy available on the host. + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + audit: | + #Run this command to list objects in default namespace + oc project default + oc get all + #The only entries there should be system managed resources such as the kubernetes and openshift service + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/util.go b/cmd/util.go index 275de2326..e73b8d84c 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -507,6 +507,8 @@ func getPlatformBenchmarkVersion(platform Platform) string { return "rh-0.7" case "4.1": return "rh-1.0" + case "4.15": + return "rh-1.6" } case "vmware": return "tkgi-1.2.53" @@ -581,7 +583,7 @@ func getOcpValidVersion(ocpVer string) (string, error) { for !isEmpty(ocpVer) { glog.V(3).Info(fmt.Sprintf("getOcpBenchmarkVersion check for ocp: %q \n", ocpVer)) - if ocpVer == "3.10" || ocpVer == "4.1" { + if ocpVer == "4.15" || ocpVer == "4.1" || ocpVer == "3.10" { glog.V(1).Info(fmt.Sprintf("getOcpBenchmarkVersion found valid version for ocp: %q \n", ocpVer)) return ocpVer, nil } diff --git a/cmd/util_test.go b/cmd/util_test.go index 2c24a7a95..92ddeb879 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -708,6 +708,13 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { }, want: "rh-1.0", }, + { + name: "openshift4_15", + args: args{ + platform: Platform{Name: "ocp", Version: "4.15"}, + }, + want: "rh-1.6", + }, { name: "k3s", args: args{ @@ -751,6 +758,7 @@ func Test_getOcpValidVersion(t *testing.T) { {openShiftVersion: "4.1", succeed: true, exp: "4.1"}, {openShiftVersion: "4.5", succeed: true, exp: "4.1"}, {openShiftVersion: "4.6", succeed: true, exp: "4.1"}, + {openShiftVersion: "4.16", succeed: true, exp: "4.16"}, {openShiftVersion: "invalid", succeed: false, exp: ""}, } for _, c := range cases { diff --git a/docs/architecture.md b/docs/architecture.md index c65978f0a..3ed01527e 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -13,28 +13,28 @@ Check the contents of the benchmark directory under `cfg` to see which targets a The following table shows the valid targets based on the CIS Benchmark version. -| CIS Benchmark | Targets | -|----------------------|---------| -| cis-1.5 | master, controlplane, node, etcd, policies | -| cis-1.6 | master, controlplane, node, etcd, policies | -| cis-1.20 | master, controlplane, node, etcd, policies | -| cis-1.23 | master, controlplane, node, etcd, policies | -| cis-1.24 | master, controlplane, node, etcd, policies | -| cis-1.7 | master, controlplane, node, etcd, policies | -| cis-1.8 | master, controlplane, node, etcd, policies | -| cis-1.9 | master, controlplane, node, etcd, policies | -| gke-1.0 | master, controlplane, node, etcd, policies, managedservices | -| gke-1.2.0 | controlplane, node, policies, managedservices | -| gke-1.6.0 | controlplane, node, policies, managedservices | -| eks-1.0.1 | controlplane, node, policies, managedservices | -| eks-1.1.0 | controlplane, node, policies, managedservices | -| eks-1.2.0 | controlplane, node, policies, managedservices | -| ack-1.0 | master, controlplane, node, etcd, policies, managedservices | -| aks-1.0 | controlplane, node, policies, managedservices | -| rh-0.7 | master,node| -| rh-1.0 | master, controlplane, node, etcd, policies | -| cis-1.6-k3s | master, controlplane, node, etcd, policies | -| cis-1.24-microk8s | master, controlplane, node, etcd, policies | +| CIS Benchmark | Targets | +|-------------------|---------| +| cis-1.5 | master, controlplane, node, etcd, policies | +| cis-1.6 | master, controlplane, node, etcd, policies | +| cis-1.20 | master, controlplane, node, etcd, policies | +| cis-1.23 | master, controlplane, node, etcd, policies | +| cis-1.24 | master, controlplane, node, etcd, policies | +| cis-1.7 | master, controlplane, node, etcd, policies | +| cis-1.8 | master, controlplane, node, etcd, policies | +| cis-1.9 | master, controlplane, node, etcd, policies | +| gke-1.0 | master, controlplane, node, etcd, policies, managedservices | +| gke-1.2.0 | controlplane, node, policies, managedservices | +| eks-1.0.1 | controlplane, node, policies, managedservices | +| eks-1.1.0 | controlplane, node, policies, managedservices | +| eks-1.2.0 | controlplane, node, policies, managedservices | +| ack-1.0 | master, controlplane, node, etcd, policies, managedservices | +| aks-1.0 | controlplane, node, policies, managedservices | +| rh-0.7 | master,node| +| rh-1.0 | master, controlplane, node, etcd, policies | +| rh-1.6 | master, controlplane, node, etcd, policies | +| cis-1.6-k3s | master, controlplane, node, etcd, policies | +| cis-1.24-microk8s | master, controlplane, node, etcd, policies | The following table shows the valid DISA STIG versions diff --git a/docs/platforms.md b/docs/platforms.md index d6fbcf712..be85f3978 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -17,7 +17,7 @@ Some defined by other hardenening guides. | CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | | CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 | | CIS | [1.8](https://workbench.cisecurity.org/benchmarks/12958) | cis-1.8 | 1.26 | -| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27-1.29 | +| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27-1.29 | | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | | CIS | [GKE 1.6.0](https://workbench.cisecurity.org/benchmarks/16093) | gke-1.6.0 | GKE | @@ -27,7 +27,8 @@ Some defined by other hardenening guides. | CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | | CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | | RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | -| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | +| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1-15 | +| CIS | [OCP4 1.6.0](https://workbench.cisecurity.org/benchmarks/16094) | rh-1.6 | OCP 4.16- | | CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | | DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | | CIS | [TKGI 1.2.53](https://network.pivotal.io/products/p-compliance-scanner#/releases/1248397) | tkgi-1.2.53 | vmware | diff --git a/docs/running.md b/docs/running.md index c482a78b6..e792fadbe 100644 --- a/docs/running.md +++ b/docs/running.md @@ -132,9 +132,10 @@ docker push .dkr.ecr..amazonaws.com/k8s/kube-bench: ### Running on OpenShift | OpenShift Hardening Guide | kube-bench config | -| ------------------------- | ----------------- | +|---------------------------|-------------------| | ocp-3.10 + | rh-0.7 | -| ocp-4.1 + | rh-1.0 | +| ocp-4.1-4.15 | rh-1.0 | +| ocp-4.16 + | rh-1.6 | kube-bench includes a set of test files for Red Hat's OpenShift hardening guide for OCP 3.10 and 4.1. To run this you will need to specify `--benchmark rh-07`, or `--version ocp-3.10` or,`--version ocp-4.5` or `--benchmark rh-1.0` From b7b566d634e57405dd971eb00048b88c60e2f1ab Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Mon, 2 Sep 2024 00:57:32 +0530 Subject: [PATCH 02/13] Update all TCs in node.yaml --- cfg/rh-1.6/node.yaml | 219 +++++++++++++++++-------------------------- 1 file changed, 87 insertions(+), 132 deletions(-) diff --git a/cfg/rh-1.6/node.yaml b/cfg/rh-1.6/node.yaml index 0f29a7066..62a3a396a 100644 --- a/cfg/rh-1.6/node.yaml +++ b/cfg/rh-1.6/node.yaml @@ -37,7 +37,7 @@ groups: scored: true - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + text: "If proxy kube proxy configuration file exists ensure permissions are set to 644 or more restrictive (Manual)" audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') @@ -63,7 +63,7 @@ groups: scored: false - id: 4.1.4 - text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') @@ -86,7 +86,7 @@ groups: scored: false - id: 4.1.5 - text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" audit: | # Check permissions NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') @@ -100,10 +100,10 @@ groups: value: "644" remediation: | None required. - scored: false + scored: true - id: 4.1.6 - text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null @@ -113,13 +113,13 @@ groups: - flag: root:root remediation: | None required. - scored: false + scored: true - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/cert/ca.pem 2> /dev/null use_multiple_values: true tests: test_items: @@ -135,7 +135,7 @@ groups: text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/cert/ca.pem 2> /dev/null use_multiple_values: true tests: test_items: @@ -145,17 +145,17 @@ groups: scored: true - id: 4.1.9 - text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/data/kubelet/config.json 2> /dev/null use_multiple_values: true tests: test_items: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | None required. scored: true @@ -164,7 +164,7 @@ groups: text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/data/kubelet/config.json 2> /dev/null use_multiple_values: true tests: test_items: @@ -177,7 +177,7 @@ groups: text: "Kubelet" checks: - id: 4.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + text: "Activate Garbage collection in OpenShift Container Platform 4, as appropriate (Manual)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null @@ -187,170 +187,133 @@ groups: - flag: "enabled: true" set: false remediation: | - Follow the instructions in the documentation to create a Kubelet config CRD - and set the anonymous-auth is set to false. + To configure, follow the directions in Garbage Collection Remediation https://docs.openshift.com/container-platform/latest/nodes/nodes/nodes-nodes-garbage-collection.html. scored: true - id: 4.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: | + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.anonymous.enabled' 2> /dev/null + use_multiple_values: true + tests: + test_items: + - flag: "true" + set: false + remediation: | + Create a kubeletconfig to explicitly disable anonymous authentication. Examples of how + to do this can be found in the OpenShift documentation. + scored: true + + - id: 4.2.3 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" type: manual # Takes a lot of time for connection to fail and audit: | - POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') - TOKEN=$(oc whoami -t) NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authorization' 2> /dev/null use_multiple_values: true tests: test_items: - - flag: "Connection timed out" + - flag: mode + compare: + op: noteq + value: AlwaysAllow remediation: | None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes. - scored: false + scored: true - - id: 4.2.3 + - id: 4.2.4 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.x509' 2> /dev/null use_multiple_values: true tests: test_items: - - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' + - flag: clientCAFile + compare: + op: eq + value: /etc/kubernetes/kubelet-ca.crt remediation: | None required. Changing the clientCAFile value is unsupported. scored: true - - id: 4.2.4 + - id: 4.2.5 text: "Verify that the read only port is not used or is set to 0 (Automated)" audit: | + oc -n openshift-kube-apiserver get cm config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments' 2> /dev/null echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null tests: - bin_op: or test_items: - - flag: "read-only-port" + - flag: kubelet-read-only-port compare: op: has value: "[\"0\"]" - - flag: "read-only-port" - set: false remediation: | In earlier versions of OpenShift 4, the read-only-port argument is not used. - Follow the instructions in the documentation to create a Kubelet config CRD - and set the --read-only-port is set to 0. + Follow the instructions in the documentation https://docs.openshift.com/container-platform/latest/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks + to create a kubeletconfig CRD and set the kubelet-read-only-port is set to 0. scored: true - - id: 4.2.5 + - id: 4.2.6 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" audit: | # Should return 1 for node NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null - echo exit_code=$? - # Should return 1 for node - oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null - echo exit_code=$? + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' 2> /dev/null use_multiple_values: true tests: - bin_op: or test_items: - - flag: --streaming-connection-idle-timeout - compare: - op: noteq - value: 0 - flag: streamingConnectionIdleTimeout compare: op: noteq value: 0s - - flag: "exit_code" - compare: - op: eq - value: 1 remediation: | - Follow the instructions in the documentation to create a Kubelet config CRD and set - the --streaming-connection-idle-timeout to the desired value. Do not set the value to 0. + Follow the instructions https://docs.openshift.com/container-platform/latest/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks in the documentation to create a kubeletconfig CRD and set + the streamingConnectionIdleTimeout to the desired value. Do not set the value to 0. scored: true - - id: 4.2.6 - text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" - audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null - tests: - test_items: - - flag: protectKernelDefaults - set: false - remediation: | - None required. The OpenShift 4 kubelet modifies the system tunable; - using the protect-kernel-defaults flag will cause the kubelet to fail on start if the tunables - don't match the kubelet configuration and the OpenShift node will fail to start. - scored: false - - id: 4.2.7 text: "Ensure that the --make-iptables-util-chains argument is set to true (Manual)" audit: | - /bin/bash - flag=make-iptables-util-chains - opt=makeIPTablesUtilChains - # look at each machineconfigpool - while read -r pool nodeconfig; do - # true by default - value='true' - # first look for the flag - oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.systemd[][] | select(.name=="kubelet.service") | .contents' | sed -n "/^ExecStart=/,/^\$/ { /^\\s*--$flag=false/ q 100 }" - # if the above command exited with 100, the flag was false - [ $? == 100 ] && value='false' - # now look in the yaml KubeletConfig - yamlconfig=$(oc get machineconfig $nodeconfig -o json | jq -r '.spec.config.storage.files[] | select(.path=="/etc/kubernetes/kubelet.conf") | .contents.source ' | sed 's/^data:,//' | while read; do echo -e ${REPLY//%/\\x}; done) - echo "$yamlconfig" | sed -n "/^$opt:\\s*false\\s*$/ q 100" - [ $? == 100 ] && value='false' - echo "Pool $pool has $flag ($opt) set to $value" - done < <(oc get machineconfigpools -o json | jq -r '.items[] | select(.status.machineCount>0) | .metadata.name + " " + .spec.configuration.name') + # Should return 1 for node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' 2> /dev/null use_multiple_values: true tests: test_items: - - flag: "set to true" + - flag: makeIPTablesUtilChains + compare: + op: eq + value: true remediation: | - None required. The --make-iptables-util-chains argument is set to true by default. + None required. The makeIPTablesUtilChains argument is set to true by default. scored: false - id: 4.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" + text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" audit: | - echo `oc get machineconfig 01-worker-kubelet -o yaml | grep hostname-override` - echo `oc get machineconfig 01-master-kubelet -o yaml | grep hostname-override` + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' 2> /dev/null tests: test_items: - - flag: hostname-override - set: false + - flag: kubeAPIQPS + compare: + op: gte + value: 0 remediation: | - By default, --hostname-override argument is not set. + None required by default. Follow the documentation to edit kubeletconfig parameters + https://docs.openshift.com/container-platform/4.15/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks scored: false - id: 4.2.9 - text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" audit: | - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf; - oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 - oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 - type: "manual" - remediation: | - Follow the documentation to edit kubelet parameters - https://docs.openshift.com/container-platform/4.15/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters - KubeAPIQPS: - scored: false - - - id: 4.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: | - oc get configmap config -n openshift-kube-apiserver -o json \ - | jq -r '.data["config.yaml"]' \ - | jq -r '.apiServerArguments | - .["kubelet-client-certificate"][0], - .["kubelet-client-key"][0] - ' + oc get configmap config -n openshift-kube-apiserver -ojson | \ + jq -r '.data["config.yaml"]' | \ + jq -r '.apiServerArguments | ."kubelet-client-certificate"[0], ."kubelet-client-key"[0]' 2> /dev/null tests: bin_op: and test_items: @@ -361,40 +324,30 @@ groups: This is not configurable. scored: true - - id: 4.2.11 + - id: 4.2.10 text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" audit: | - #Verify the rotateKubeletClientCertificate feature gate is not set to false NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null - # Verify the rotateCertificates argument is set to true - oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig' 2> /dev/null use_multiple_values: true tests: - bin_op: or test_items: - flag: rotateCertificates compare: op: eq value: true - - flag: rotateKubeletClientCertificates - compare: - op: noteq - value: false - - flag: rotateKubeletClientCertificates - set: false remediation: | None required. scored: false - - id: 4.2.12 + - id: 4.2.11 text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" audit: | #Verify the rotateKubeletServerCertificate feature gate is on NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.featureGates' 2> /dev/null # Verify the rotateCertificates argument is set to true - oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig' 2> /dev/null use_multiple_values: true tests: bin_op: or @@ -408,22 +361,24 @@ groups: op: eq value: true remediation: | - By default, kubelet server certificate rotation is disabled. + None required. By default, kubelet server certificate rotation is enabled. scored: false - - id: 4.2.13 + - id: 4.2.12 text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" audit: | # needs verification # verify cipher suites - oc describe --namespace=openshift-ingress-operator ingresscontroller/default - oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo - oc get openshiftapiservers.operator.openshift.io cluster -o json |jq .spec.observedConfig.servingInfo - oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + oc get --namespace=openshift-ingress-operator ingresscontroller/default -o json | jq '.status.tlsProfile.ciphers' 2> /dev/null + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.cipherSuites' 2> /dev/null + oc get openshiftapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.cipherSuites' 2> /dev/null + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq '.servingInfo.cipherSuites' 2> /dev/null #check value for tlsSecurityProfile; null is returned if default is used - oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile + oc get kubeapiservers.operator.openshift.io cluster -o json |jq .spec.tlsSecurityProfile 2> /dev/null type: manual remediation: | Follow the directions above and in the OpenShift documentation to configure the tlsSecurityProfile. - Configuring Ingress + Configuring Ingress. https://docs.openshift.com/container-platform/4.15/networking/ingress-operator.html#nw-ingress-controller-configuration-parameters_configuring-ingress + Please reference the OpenShift TLS security profile documentation for more detail on each profile. + https://docs.openshift.com/container-platform/4.15/security/tls-security-profiles.html scored: false From f8d3aae8087c018fe2b1c0923e5b24857e51c4c0 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Mon, 2 Sep 2024 01:07:27 +0530 Subject: [PATCH 03/13] Update all TCs in etcd.yaml --- cfg/rh-1.6/etcd.yaml | 48 ++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/cfg/rh-1.6/etcd.yaml b/cfg/rh-1.6/etcd.yaml index 22ad71b29..674e21fad 100644 --- a/cfg/rh-1.6/etcd.yaml +++ b/cfg/rh-1.6/etcd.yaml @@ -16,11 +16,11 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -42,10 +42,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -67,10 +67,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? fi use_multiple_values: true tests: @@ -91,11 +91,11 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -116,10 +116,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -141,10 +141,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? fi use_multiple_values: true tests: @@ -165,11 +165,11 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' fi use_multiple_values: true tests: From 2f50de2d0c046668130fed5287a6663b915b350b Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Mon, 2 Sep 2024 16:33:11 +0530 Subject: [PATCH 04/13] Update all TCs in policies.yaml; fix command in rh-1.0 as well --- cfg/rh-1.0/policies.yaml | 2 +- cfg/rh-1.6/policies.yaml | 142 ++++++++++++++++++++++++++++----------- 2 files changed, 102 insertions(+), 42 deletions(-) diff --git a/cfg/rh-1.0/policies.yaml b/cfg/rh-1.0/policies.yaml index e90cd877f..29653c809 100644 --- a/cfg/rh-1.0/policies.yaml +++ b/cfg/rh-1.0/policies.yaml @@ -13,7 +13,7 @@ groups: type: "manual" audit: | #To get a list of users and service accounts with the cluster-admin role - oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | + oc get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | grep cluster-admin #To verity that kbueadmin is removed, no results should be returned oc get secrets kubeadmin -n kube-system diff --git a/cfg/rh-1.6/policies.yaml b/cfg/rh-1.6/policies.yaml index fb39ca0d4..51e2b5d7f 100644 --- a/cfg/rh-1.6/policies.yaml +++ b/cfg/rh-1.6/policies.yaml @@ -13,8 +13,7 @@ groups: type: "manual" audit: | #To get a list of users and service accounts with the cluster-admin role - oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | - grep cluster-admin + oc get clusterrolebindings -o=custom-columns="NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind" | grep cluster-admin #To verity that kbueadmin is removed, no results should be returned oc get secrets kubeadmin -n kube-system remediation: | @@ -22,7 +21,7 @@ groups: if they need this role or if they could use a role with fewer privileges. Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role : - kubectl delete clusterrolebinding [name] + oc delete clusterrolebinding [name] scored: false - id: 5.1.2 @@ -36,14 +35,11 @@ groups: text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" type: "manual" audit: | - #needs verification - oc get roles --all-namespaces -o yaml - for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc - describe clusterrole ${i}; done - #Retrieve the cluster roles defined in the cluster and review for wildcards - oc get clusterroles -o yaml - for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do - oc describe clusterrole ${i}; done + # needs verification + # Run the command below to describe each cluster role and inspect it for wildcard usage + oc describe clusterrole + # Run the command below to describe each role and inspect it for wildcard usage + oc describe role -A remediation: | Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. @@ -52,6 +48,10 @@ groups: - id: 5.1.4 text: "Minimize access to create pods (Manual)" type: "manual" + audit: | + # needs verification + # Review the users who have create access to pod objects in the Kubernetes API + oc adm policy who-can create pod remediation: | Where possible, remove create access to pod objects in the cluster. scored: false @@ -66,6 +66,12 @@ groups: - id: 5.1.6 text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" type: "manual" + audit: | + # needs verification + # Find all pods that automatically mount service account tokens + oc get pods -A -o json | jq '.items[] | select(.spec.automountServiceAccountToken) | .metadata.name' + # Find all service accounts that automatically mount service tokens + oc get serviceaccounts -A -o json | jq '.items[] | select(.automountServiceAccountToken) | .metadata.name' remediation: | Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it. @@ -83,8 +89,8 @@ groups: test_items: - flag: "false" remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow - Privileged field is set to false. + Create an SCC that sets allowPrivilegedContainer to false and take it into use by + assigning it to applicable users and groups. scored: false - id: 5.2.2 @@ -95,8 +101,8 @@ groups: test_items: - flag: "false" remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - PID field is set to false. + Create an SCC that sets allowHostPID to false and take it into use by assigning it to + applicable users and groups. scored: false - id: 5.2.3 @@ -107,8 +113,8 @@ groups: test_items: - flag: "false" remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - IPC field is set to false. + Create an SCC that sets allowHostIPC to false and take it into use by assigning it to + applicable users and groups. scored: false - id: 5.2.4 @@ -119,8 +125,8 @@ groups: test_items: - flag: "false" remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow Host - Network field is omitted or set to false. + Create an SCC that sets allowHostNetwork to false and take it into use by assigning + it to applicable users and groups. scored: false - id: 5.2.5 @@ -131,14 +137,14 @@ groups: test_items: - flag: "false" remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Allow - Privilege Escalation field is omitted or set to false. + Create an SCC that sets allowPrivilegeEscalation to false and take it into use by + assigning it to applicable users and groups. scored: false - id: 5.2.6 text: "Minimize the admission of root containers (Manual)" audit: | - # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' + # needs verification oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type #For SCCs with MustRunAs verify that the range of UIDs does not include 0 oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax @@ -151,8 +157,8 @@ groups: op: nothave value: 0 remediation: | - None required. By default, OpenShift includes the non-root SCC with the the Run As User - Strategy is set to either MustRunAsNonRoot. If additional SCCs are appropriate, follow the + None required. By default, OpenShift includes the nonroot and nonroot-v2 SCCs that + restrict the ability to run as nonroot. If additional SCCs are appropriate, follow the OpenShift documentation to create custom SCCs. scored: false @@ -167,27 +173,65 @@ groups: - flag: "ALL" - flag: "NET_RAW" remediation: | - Create a SCC as described in the OpenShift documentation, ensuring that the Required - Drop Capabilities is set to include either NET_RAW or ALL. + Create an SCC that sets requiredDropCapabilities to include ALL or at least + NET_RAW and take it into use by assigning it to applicable users and groups. scored: false - id: 5.2.8 text: "Minimize the admission of containers with added capabilities (Manual)" type: "manual" + audit: | + # needs verification + # List all SCCs that prohibit users from defining container capabilities + oc get scc -A -o json | jq '.items[] | select(.allowedCapabilities==null) | .metadata.name' + # List all SCCs that do not set default container capabilities + oc get scc -A -o json | jq '.items[] | select(.defaultAddCapabilities==null) | .metadata.name' + tests: + test_items: + - flag: "false" remediation: | - Ensure that Allowed Capabilities is set to an empty array for every SCC in the cluster - except for the privileged SCC. + Utilize the restricted-v2 SCC or create an SCC that sets allowedCapabilities and + defaultAddCapabilities to an empty list and take it into use by assigning it to + applicable users and groups. scored: false - id: 5.2.9 text: "Minimize the admission of containers with capabilities assigned (Manual)" type: "manual" + audit: | + # needs verification + # List all SCCs that drop all capabilities from containers + oc get scc -A -o json | jq '.items[] | select(.requiredDropCapabilities[]?|any(. == "ALL"; .)) | .metadata.name' + tests: + test_items: + - flag: "false" remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabilities to operate, consider adding a SCC which forbids the admission of containers which do not drop all capabilities. scored: false + - id: 5.2.10 + text: "Minimize access to privileged Security Context Constraints (Manual)" + type: "manual" + audit: | + # needs verification + # All users and groups with access to SCCs that include privileged or elevated capabilities. + oc get scc -ojson | jq '.items[]|select(.allowHostIPC or .allowHostPID or .allowHostPorts + or .allowHostNetwork or .allowHostDirVolumePlugin + or .allowPrivilegedContainer or .runAsUser.type != "MustRunAsRange") | + .metadata.name,{"Group:":.groups},{"User":.users}' + tests: + test_items: + - flag: "false" + remediation: | + Remove any users and groups who do not need access to an SCC, following the principle of least privilege. + You can remove users and groups from an SCC using the oc edit scc $NAME command. + Additionally, you can create your own SCCs that contain the container functionality you + need for a particular use case and assign that SCC to users and groups if the default + SCCs are not appropriate for your use case. + scored: false + - id: 5.3 text: "Network Policies and CNI" checks: @@ -195,7 +239,7 @@ groups: text: "Ensure that the CNI in use supports Network Policies (Manual)" type: "manual" remediation: | - None required. + None required. This will depend on the CNI plugin in use. scored: false - id: 5.3.2 @@ -216,8 +260,7 @@ groups: type: "manual" audit: | #Run the following command to find references to objects which use environment variables defined from secrets. - oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} - {.metadata.name} {"\n"}{end}' -A + oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A remediation: | If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables. @@ -237,8 +280,11 @@ groups: - id: 5.5.1 text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" type: "manual" + audit: | + # needs verification + oc get image.config.openshift.io/cluster -o json | jq .spec.registrySources remediation: | - Follow the OpenShift documentation: [Image configuration resources](https://docs.openshift.com/container-platform/4.15/openshift_images/image-configuration.html + Follow the OpenShift documentation for Image Configuration resources: https://docs.openshift.com/container-platform/4.15/openshift_images/image-configuration.html scored: false - id: 5.7 @@ -251,22 +297,34 @@ groups: #Run the following command and review the namespaces created in the cluster. oc get namespaces #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. + oc get namespaces -o json | jq '.items[] | select(.metadata.name|test("(?!default|kube-.|openshift|openshift-.)^.*")) | .metadata.name' remediation: | - Follow the documentation and create namespaces for objects in your deployment as you need - them. + Follow the documentation and create namespaces for objects in your deployment as you need them. scored: false - id: 5.7.2 text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" type: "manual" + audit: | + oc get pods -A -o json | jq '.items[] | select( (.metadata.namespace | test("^kube*|^openshift*") | not) + and .spec.securityContext.seccompProfile.type==null) | + (.metadata.namespace + "/" + .metadata.name)' remediation: | - To enable the default seccomp profile, use the reserved value /runtime/default that will - make sure that the pod uses the default policy available on the host. + For any non-privileged pods or containers that do not have seccomp profiles, consider + using the RuntimeDefault or creating a custom seccomp profile specifically for the workload. + Please refer to the OpenShift documentation for working with custom seccomp profiles. + https://docs.openshift.com/container-platform/4.15/security/seccomp-profiles.html scored: false - id: 5.7.3 text: "Apply Security Context to Your Pods and Containers (Manual)" type: "manual" + audit: | + # needs verification + # obtain a list of pods that are using privileged security context constraints + oc get pods -A -o json | jq '.items[] | select(.metadata.annotations."openshift.io/scc"|test("privileged"?)) | .metadata.name' + # obtain a list of pods that are not using security context constraints at all + oc get pods -A -o json | jq '.items[] | select(.metadata.annotations."openshift.io/scc" == null) | .metadata.name' remediation: | Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker @@ -277,9 +335,11 @@ groups: text: "The default namespace should not be used (Manual)" type: "manual" audit: | - #Run this command to list objects in default namespace - oc project default - oc get all + # Run the following command to list all resources in the default namespace, besides the kubernetes and + # openshift services, which are expected to be in the default namespace + oc get all -n default -o json | jq '.items[] | select((.kind|test("Service")) + and (.metadata.name|test("openshift|kubernetes"))? | not) | + (.kind + "/" + .metadata.name)' #The only entries there should be system managed resources such as the kubernetes and openshift service remediation: | Ensure that namespaces are created to allow for appropriate segregation of Kubernetes From e510a337ff38cf826f782cab0c3d4c5112b5c9e0 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Tue, 17 Sep 2024 11:07:00 +0530 Subject: [PATCH 05/13] Update all TCs in master.yaml --- cfg/rh-1.6/master.yaml | 670 +++++++++++++++-------------------------- 1 file changed, 235 insertions(+), 435 deletions(-) diff --git a/cfg/rh-1.6/master.yaml b/cfg/rh-1.6/master.yaml index f3f7d01e0..528c07926 100644 --- a/cfg/rh-1.6/master.yaml +++ b/cfg/rh-1.6/master.yaml @@ -31,7 +31,9 @@ groups: op: bitmask value: "600" remediation: | - No remediation required; file permissions are managed by the operator. + There is no remediation for updating the permissions of kube-apiserver-pod.yaml. + The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state. + Please do not attempt to remediate the permissions of this file. scored: false - id: 1.1.2 @@ -58,7 +60,7 @@ groups: scored: false - id: 1.1.3 - text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Manual)" audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') @@ -78,9 +80,11 @@ groups: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | - No remediation required; file permissions are managed by the operator. + There is no remediation for updating the permissions of kube-controller-manager-pod.yaml. + The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state. + Please do not attempt to remediate the permissions of this file. scored: false - id: 1.1.4 @@ -107,7 +111,7 @@ groups: scored: false - id: 1.1.5 - text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Manual)" audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') @@ -127,9 +131,11 @@ groups: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | - No remediation required; file permissions are managed by the operator. + There is no remediation for updating the permissions of kube-scheduler-pod.yaml. + The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state. + Please do not attempt to remediate the permissions of this file. scored: false - id: 1.1.6 @@ -156,7 +162,7 @@ groups: scored: false - id: 1.1.7 - text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Manual))" audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') @@ -176,9 +182,11 @@ groups: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | - No remediation required; file permissions are managed by the operator. + There is no remediation for updating the permissions of etcd-pod.yaml. + The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state. + Please do not attempt to remediate the permissions of this file. scored: false - id: 1.1.8 @@ -205,7 +213,7 @@ groups: scored: false - id: 1.1.9 - text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') @@ -216,9 +224,9 @@ groups: if [ -z "$POD_NAME" ]; then echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null fi # For SDN pods POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) @@ -226,9 +234,9 @@ groups: if [ -z "$POD_NAME" ]; then echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null fi # For OVS pods @@ -237,10 +245,10 @@ groups: if [ -z "$POD_NAME" ]; then echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null fi use_multiple_values: true tests: @@ -248,7 +256,7 @@ groups: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | No remediation required; file permissions are managed by the operator. scored: false @@ -265,9 +273,9 @@ groups: if [ -z "$POD_NAME" ]; then echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null - oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null + oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null fi # For SDN pods POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) @@ -275,9 +283,9 @@ groups: if [ -z "$POD_NAME" ]; then echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null fi # For OVS pods in 4.5 POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) @@ -285,15 +293,17 @@ groups: if [ -z "$POD_NAME" ]; then echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null - oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null fi use_multiple_values: true tests: + bin_op: or test_items: - flag: "root:root" + - flag: "openvswitch:openvswitch" remediation: | No remediation required; file permissions are managed by the operator. scored: false @@ -321,7 +331,7 @@ groups: op: bitmask value: "700" remediation: | - No remediation required; file permissions are managed by the operator. + No remediation required; file permissions are managed by the etcd operator. scored: false - id: 1.1.12 @@ -348,7 +358,7 @@ groups: scored: false - id: 1.1.13 - text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" + text: "Ensure that the kubeconfig file ownership are set to 600 or more restrictive (Manual))" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null @@ -358,13 +368,15 @@ groups: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | - No remediation required; file permissions are managed by the operator. + There is no remediation for updating the permissions of kubeconfig. + The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state. + Please do not attempt to remediate the permissions of this file. scored: false - id: 1.1.14 - text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" + text: "Ensure that the kubeconfig file ownership is set to root:root (Manual)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null @@ -377,7 +389,7 @@ groups: scored: false - id: 1.1.15 - text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" + text: "Ensure that the Scheduler kubeconfig file permissions are set to 600 or more restrictive (Manual)" audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') @@ -386,10 +398,10 @@ groups: POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." + echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig fi use_multiple_values: true tests: @@ -397,9 +409,11 @@ groups: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | - No remediation required; file permissions are managed by the operator. + There is no remediation for updating the permissions of the kubeconfig file. + The file is owned by an OpenShift operator and any changes to the file will result in a degraded cluster state. + Please do not attempt to remediate the permissions of this file. scored: false - id: 1.1.16 @@ -412,10 +426,10 @@ groups: POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." + echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig fi use_multiple_values: true tests: @@ -426,7 +440,7 @@ groups: scored: false - id: 1.1.17 - text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Manual)" audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') @@ -435,10 +449,10 @@ groups: POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." + echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig fi use_multiple_values: true tests: @@ -446,7 +460,7 @@ groups: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | No remediation required; file permissions are managed by the operator. scored: false @@ -461,10 +475,10 @@ groups: POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." + echo "No matching pods found on the current node." else - # Execute the stat command - oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig fi use_multiple_values: true tests: @@ -475,7 +489,7 @@ groups: scored: false - id: 1.1.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" + text: "Ensure that the OpenShift PKI directory and file ownership is set to root:root (Manual)" audit: | # Should return root:root for all files and directories # Get the node name where the pod is running @@ -485,14 +499,14 @@ groups: POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." + echo "No matching pods found on the current node." else - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - # echo $i static-pod-resources - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + # echo $i static-pod-certs + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + # echo $i static-pod-resources + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; fi use_multiple_values: true tests: @@ -503,7 +517,7 @@ groups: scored: false - id: 1.1.20 - text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" + text: "Ensure that the OpenShift PKI certificate file permissions are set to 600 or more restrictive (Manual)" audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') @@ -523,7 +537,7 @@ groups: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | No remediation required; file permissions are managed by the operator. scored: false @@ -560,18 +574,17 @@ groups: - id: 1.2.1 text: "Ensure that anonymous requests are authorized (Manual)" audit: | - # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' - # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?.userGroups' - # To verify RBAC is enabled - oc get clusterrolebinding - oc get clusterrole - oc get rolebinding - oc get role + # To see what unauthenticated users are allowed to do. + oc get clusterrolebindings -o json | jq '.items[] | select(.subjects[]?.kind == "Group" and .subjects[]?.name == "system:unauthenticated") | .metadata.name' | uniq tests: + bin_op: or test_items: - - flag: "system:unauthenticated" + - flag: "self-access-reviewers" + - flag: "system:oauth-token-deleters" + - flag: "system:openshift:public-info-viewer" + - flag: "system:public-info-viewer" + - flag: "system:scope-impersonation" + - flag: "system:webhooks" remediation: | None required. The default configuration should not be modified. scored: false @@ -621,11 +634,7 @@ groups: - id: 1.2.4 text: "Use https for kubelet connections (Manual)" audit: | - #for 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' - #for 4.6 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #for both 4.5 and 4.6 oc -n openshift-apiserver describe secret serving-cert tests: bin_op: and @@ -641,11 +650,7 @@ groups: - id: 1.2.5 text: "Ensure that the kubelet uses certificates to authenticate (Manual)" audit: | - #for 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' - #for 4.6 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #for both 4.5 and 4.6 oc -n openshift-apiserver describe secret serving-cert tests: bin_op: and @@ -662,9 +667,6 @@ groups: - id: 1.2.6 text: "Verify that the kubelet certificate authority is set as appropriate (Manual)" audit: | - # for 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' - # for 4.6 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' tests: test_items: @@ -679,116 +681,48 @@ groups: - id: 1.2.7 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" audit: | - # To verify that the authorization-mode argument is not used - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - # To verify RBAC is configured: - oc get clusterrolebinding - oc get clusterrole - oc get rolebinding - oc get role - audit_config: | oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' tests: - bin_op: or test_items: - path: "{.authorization-mode}" compare: op: nothave value: "AlwaysAllow" - - path: "{.authorization-mode}" - flag: "authorization-mode" - set: false remediation: | None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode. scored: false - id: 1.2.8 - text: "Verify that the Node authorizer is enabled (Manual)" - audit: | - # For OCP 4.5 and earlier verify that authorization-mode is not used - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' - # For OCP 4.5 and earlier verify that authorization-mode is not used - NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null - oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - audit_config: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - tests: - bin_op: or - test_items: - - path: "{.authorization-mode}" - compare: - op: has - value: "Node" - - path: "{.authorization-mode}" - flag: "authorization-mode" - set: false - remediation: | - No remediation is required. - scored: false - - - id: 1.2.9 text: "Verify that RBAC is enabled (Manual)" audit: | - # For 4.5 To verify that the authorization-mode argument is not used - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - # To verify RBAC is used - oc get clusterrolebinding - oc get clusterrole - oc get rolebinding - oc get role - # For 4.6, verify that the authorization-mode argument includes RBAC - audit_config: | oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' tests: - bin_op: or test_items: - path: "{.authorization-mode}" compare: op: has value: "RBAC" - - path: "{.authorization-mode}" - flag: "authorization-mode" - set: false remediation: | None. It is not possible to disable RBAC. scored: false - - id: 1.2.10 + - id: 1.2.9 text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)" audit: | #Verify the APIPriorityAndFairness feature-gate oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' tests: - bin_op: and test_items: - flag: "APIPriorityAndFairness=true" - - flag: "EventRateLimit" - set: false remediation: | - No remediation is required + No remediation is required. By default, the OpenShift kubelet has been fixed to send fewer requests. scored: false - - id: 1.2.11 + - id: 1.2.10 text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)" audit: | #Verify the set of admission-plugins for OCP 4.6 and higher oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' tests: test_items: - flag: "AlwaysAdmit" @@ -797,13 +731,11 @@ groups: No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift. scored: false - - id: 1.2.12 + - id: 1.2.11 text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" audit: | #Verify the set of admissi on-plugins for OCP 4.6 and higher oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' tests: test_items: - flag: "AlwaysPullImages" @@ -812,120 +744,56 @@ groups: None required. scored: false - - id: 1.2.13 - text: "Ensure that the admission control plugin SecurityContextDeny is not set (Manual)" - audit: | - #Verify the set of admission-plugins for OCP 4.6 and higher - oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextDeny and SecurityContextConstraint compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - #Verify that SecurityContextConstraints are deployed - oc get scc - oc describe scc restricted - tests: - bin_op: and - test_items: - - flag: "SecurityContextConstraint" - set: true - - flag: "anyuid" - - flag: "hostaccess" - - flag: "hostmount-anyuid" - - flag: "hostnetwork" - - flag: "node-exporter" - - flag: "nonroot" - - flag: "privileged" - - flag: "restricted" - remediation: | - None required. The Security Context Constraint admission controller cannot be disabled in OpenShift 4. - scored: false - - - id: 1.2.14 + - id: 1.2.12 text: "Ensure that the admission control plugin ServiceAccount is set (Manual)" audit: | - #Verify the list of admission controllers for 4.6 and higher oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has ServiceAccount compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - #Verify that Service Accounts are present - oc get sa -A tests: test_items: - flag: "ServiceAccount" set: true remediation: | - None required. OpenShift is configured to use service accounts by default. + None required. By default, OpenShift configures the ServiceAccount admission controller. scored: false - - id: 1.2.15 + - id: 1.2.13 text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)" audit: | - #Verify the list of admission controllers for 4.6 and higher oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has NamespaceLifecycle compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' tests: test_items: - flag: "NamespaceLifecycle" remediation: | - Ensure that the --disable-admission-plugins parameter does not include NamespaceLifecycle. + None required. OpenShift configures NamespaceLifecycle admission controller by default. scored: false - - id: 1.2.16 + - id: 1.2.14 text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)" audit: | - #Verify the set of admission-plugins for OCP 4.6 and higher oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextConstraint compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' - #Verify that SecurityContextConstraints are deployed - oc get scc - oc describe scc restricted tests: - bin_op: and test_items: - - flag: "SecurityContextConstraint" - - flag: "anyuid" - - flag: "hostaccess" - - flag: "hostmount-anyuid" - - flag: "hostnetwork" - - flag: "node-exporter" - - flag: "nonroot" - - flag: "privileged" - - flag: "restricted" + - flag: "security.openshift.io/SecurityContextConstraint" remediation: | - None required. Security Context Constraints are enabled by default in OpenShift and cannot be disabled. + None required. By default, the SecurityContextConstraints admission controller is configured and cannot be disabled. scored: false - - id: 1.2.17 + - id: 1.2.15 text: "Ensure that the admission control plugin NodeRestriction is set (Manual)" audit: | - # For 4.5, review the control plane manifest https://github.com/openshift/origin/blob/release-4.15/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go#L132 - #Verify the set of admission-plugins for OCP 4.6 and higher oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' - output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') - [ "$output" == "null" ] && echo "ocp 4.5 has NodeRestriction compiled" || echo $output - #Check that no overrides are configured - oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' tests: test_items: - flag: "NodeRestriction" remediation: | - The NodeRestriction plugin cannot be disabled. + None required. In OpenShift, the NodeRestriction admission plugin is enabled by default and cannot be disabled. scored: false - - id: 1.2.18 + - id: 1.2.16 text: "Ensure that the --insecure-bind-address argument is not set (Manual)" audit: | # InsecureBindAddress=true should not be in the results - oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{range .spec.observedConfig.apiServerArguments.feature-gates[*]}{@}{"\n"}{end}' + oc get kubeapiservers.operator.openshift.io cluster -ojson | jq '.spec.observedConfig.apiServerArguments."feature-gates"' # Result should be only 6443 oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' # Result should be only 8443 @@ -933,36 +801,30 @@ groups: tests: bin_op: and test_items: - - flag: "insecure-bind-address" + - flag: "InsecureBindAddress=true" set: false - flag: 6443 - flag: 8443 remediation: | - None required. + None required. By default, the openshift-kube-apiserver is served over HTTPS with authentication and authorization. scored: false - - id: 1.2.19 + - id: 1.2.17 text: "Ensure that the --insecure-port argument is set to 0 (Manual)" audit: | # Should return 6443 oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' - # For OCP 4.6 and above - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]' - output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]') - [ "$output" == "null" ] && echo "ocp 4.5 has insecure-port set to \"0\" compiled" || echo $output tests: - bin_op: and test_items: - - flag: "\"0\"" - flag: "6443" remediation: | - None required. The configuration is managed by the API server operator. + None required. By default, the openshift-kube-server is served over HTTPS with authentication and authorization. scored: false - - id: 1.2.20 + - id: 1.2.18 text: "Ensure that the --secure-port argument is not set to 0 (Manual)" audit: | - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig' + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.bindAddress' # Should return only 6443 echo ports=`oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}'` tests: @@ -974,10 +836,11 @@ groups: op: regex value: '\s*(?:6443\s*){1,}$' remediation: | - None required. + None required. By default, the openshift-kube-apiserver is served over HTTPS with authentication and authorization; + the secure API endpoint is bound to 0.0.0.0:6443. scored: false - - id: 1.2.21 + - id: 1.2.19 text: "Ensure that the healthz endpoint is protected by RBAC (Manual)" type: manual audit: | @@ -992,143 +855,114 @@ groups: # Create a service account to test RBAC oc create -n openshift-kube-apiserver sa permission-test-sa # Should return 403 Forbidden - SA_TOKEN=$(oc sa -n openshift-kube-apiserver get-token permission-test-sa) + export SA_TOKEN=$(oc create token -n openshift-kube-apiserver permission-test-sa) oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete -n openshift-kube-apiserver sa permission-test-sa # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + export CLUSTER_ADMIN_TOKEN=$(oc whoami -t) oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + # Cleanup + unset CLUSTER_ADMIN_TOKEN SA_TOKEN + oc delete -n openshift-kube-apiserver sa permission-test-sa remediation: | None required as profiling data is protected by RBAC. scored: false - - id: 1.2.22 + - id: 1.2.20 text: "Ensure that the --audit-log-path argument is set (Manual)" audit: | # Should return “/var/log/kube-apiserver/audit.log" - output=$(oc get configmap config -n openshift-kube-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') oc rsh -n openshift-kube-apiserver -c kube-apiserver $POD ls /var/log/kube-apiserver/audit.log 2>/dev/null # Should return 0 - echo exit_code=$? + echo kube_apiserver_exit_code=$? # Should return "/var/log/openshift-apiserver/audit.log" - output=$(oc get configmap config -n openshift-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true POD=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}') oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null # Should return 0 - echo exit_code=$? + echo apiserver_exit_code=$? use_multiple_values: true tests: bin_op: or test_items: - flag: "/var/log/kube-apiserver/audit.log" - flag: "/var/log/openshift-apiserver/audit.log" - - flag: "exit_code=0" + - flag: "kube_apiserver_exit_code=0" + - flag: "apiserver_exit_code=0" - flag: "null" remediation: | - None required. This is managed by the cluster apiserver operator. + None required. This is managed by the cluster apiserver operator. By default, auditing is enabled. scored: false - - id: 1.2.23 + - id: 1.2.21 text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)" type: "manual" remediation: | Follow the documentation for log forwarding. Forwarding logs to third party systems - https://docs.openshift.com/container-platform/4.15/logging/cluster-logging-external.html + https://docs.openshift.com/container-platform/4.15/observability/logging/log_collection_forwarding/configuring-log-forwarding.html scored: false - - id: 1.2.24 + - id: 1.2.22 text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)" audit: | - #NOTICE - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true - use_multiple_values: true tests: - bin_op: or test_items: - - flag: "maximumRetainedFiles" - compare: - op: gte - value: 10 - flag: "audit-log-maxbackup" compare: op: gte value: 10 remediation: | - Set the maximumRetainedFiles parameter to 10 or as an appropriate number of files. maximumRetainedFiles: 10 + None required. By default, auditing is enabled and the maximum audit log backup is set to 10. scored: false - - id: 1.2.25 - text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 or as appropriate (Manual)" + - id: 1.2.23 + text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 (Manual)" audit: | - #NOTICE - output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) - [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true - output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') - [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true - use_multiple_values: true tests: - bin_op: or test_items: - - flag: "maximumFileSizeMegabytes" - compare: - op: gte - value: 100 - flag: "audit-log-maxsize" compare: op: gte value: 100 remediation: | - Set the audit-log-maxsize parameter to 100 or as an appropriate number. + None. The audit-log-maxsize parameter is by default set to 100 and not supported to change. maximumFileSizeMegabytes: 100 scored: false - - id: 1.2.26 - text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + - id: 1.2.24 + text: "Ensure that the --request-timeout argument is set (Manual)" audit: | - echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.requestTimeoutSeconds` + echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["min-request-timeout"][]?'` tests: test_items: - flag: "requestTimeoutSeconds" + compare: + op: gte + value: 100 remediation: | - TBD + None required. By default, min-request-timeout is set to 3600 seconds in OpenShift. scored: false - - id: 1.2.27 + - id: 1.2.25 text: "Ensure that the --service-account-lookup argument is set to true (Manual)" audit: | - # For OCP 4.5 - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep service-account-lookup - # For OCP 4.6 and above - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"]' output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"][0]') [ "$output" == "null" ] && echo "ocp 4.5 has service-account-lookup=true compiled" || echo service-account-lookup=$output tests: test_items: - flag: "service-account-lookup=true" remediation: | - TBD + None required. Service account lookup is enabled by default. scored: false - - id: 1.2.28 + - id: 1.2.26 text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)" audit: | oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[] @@ -1143,14 +977,9 @@ groups: OpenShift does not reuse the apiserver TLS key. This is not configurable. scored: false - - id: 1.2.29 + - id: 1.2.27 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)" audit: | - # etcd Certificate File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.certFile - # etcd Key File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.keyFile - # NOTICE 4.6 extention oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-certfile"]' oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-keyfile"]' tests: @@ -1163,75 +992,57 @@ groups: This is not configurable. scored: false - - id: 1.2.30 + - id: 1.2.28 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" audit: | - # TLS Cert File - openshift-kube-apiserver - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.certFile - # TLS Key File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.servingInfo.keyFile' - # NOTECI 4.6 extention - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"]' - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"][]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"][]' tests: bin_op: and test_items: - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt" - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" remediation: | - OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. - This is not configurable. You may optionally set a custom default certificate to be used by the API server - when serving content in order to enable clients to access the API server at a different host name or without - the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. - Follow the directions in the OpenShift documentation User-provided certificates for the API server + None. By default, OpenShift uses X.509 certificates to provide secure connections between the API server and + node/kubelet. OpenShift does not use values assigned to the tls-cert-file or tls-private-key-file flags. + You may optionally set a custom default certificate to be used by the API server when serving content in + order to enable clients to access the API server at a different host name or without the need to distribute + the cluster-managed certificate authority (CA) certificates to the clients. + Follow the directions in the OpenShift documentation + https://docs.openshift.com/container-platform/4.15/security/certificates/api-server.html scored: false - - id: 1.2.31 + - id: 1.2.29 text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.clientCA - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"][]' tests: test_items: - flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" remediation: | - OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. - This is not configurable. You may optionally set a custom default certificate to be used by the API - server when serving content in order to enable clients to access the API server at a different host name - or without the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. - - User-provided certificates must be provided in a kubernetes.io/tls type Secret in the openshift-config namespace. - Update the API server cluster configuration, - the apiserver/cluster resource, to enable the use of the user-provided certificate. + None required. By default, OpenShift configures the client-ca-file and automatically manages the certificate. + It does not use the value assigned to the client-ca-file flag. + You may optionally set a custom default certificate to be used by the API server when serving content in + order to enable clients to access the API server at a different host name or without the need to distribute + the cluster-managed certificate authority (CA) certificates to the clients. + Please follow the OpenShift documentation for providing certificates for OpenShift to use. + https://docs.openshift.com/container-platform/4.15/security/certificate_types_descriptions/user-provided-certificates-for-api-server.html#location scored: false - - id: 1.2.32 + - id: 1.2.30 text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)" audit: | - #etcd CA File - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.ca - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"][]' tests: test_items: - flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt" remediation: | - None required. OpenShift generates the etcd-cafile and sets the arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA. - scored: false - - - id: 1.2.33 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - audit: | - # encrypt the etcd datastore - oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' - tests: - test_items: - - flag: "EncryptionCompleted" - remediation: | - Follow the OpenShift documentation for Encrypting etcd data | Authentication | OpenShift Container Platform 4.15 - https://docs.openshift.com/container-platform/4.15/security/encrypting-etcd.html + None required. By default, OpenShift uses X.509 certificates to provide secure communication to etcd. + OpenShift does not use values assigned to etcd-cafile. OpenShift generates the etcd-cafile and sets the + arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA. scored: false - - id: 1.2.34 + - id: 1.2.31 text: "Ensure that encryption providers are appropriately configured (Manual)" audit: | # encrypt the etcd datastore @@ -1240,49 +1051,54 @@ groups: test_items: - flag: "EncryptionCompleted" remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. + Follow the OpenShift documentation for encrypting etcd data. + https://docs.openshift.com/container-platform/4.15/security/encrypting-etcd.html scored: false - - id: 1.2.35 + - id: 1.2.32 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" type: manual audit: | # verify cipher suites oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo - oc get kubeapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo - oc get openshiftapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo - oc describe --namespace=openshift-ingress-operator ingresscontroller/default + oc get kubeapiservers.operator.openshift.io cluster -o json | jq .spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json | jq .spec.observedConfig.servingInfo + oc get -n openshift-ingress-operator ingresscontroller/default -o json | jq .status.tlsProfile + remediation: | + None required. By default, OpenShift uses the Intermediate TLS profile, which requires a minimum of TLS 1.2. + You can configure TLS security profiles by following the OpenShift TLS documentation. + https://docs.openshift.com/container-platform/4.15/security/tls-security-profiles.html + Note: The HAProxy Ingress controller image does not support TLS 1.3 and because the Modern profile requires + TLS 1.3, it is not supported. The Ingress Operator converts the Modern profile to Intermediate. + The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, and TLS 1.3 of a Custom + profile to 1.2. + scored: false + + - id: 1.2.33 + text: "Ensure unsupported configuration overrides are not used (Manual)" + audit: | + oc get kubeapiserver/cluster -o jsonpath='{.spec.unsupportedConfigOverrides}' + tests: + test_items: + - flag: "null" remediation: | - Verify that the tlsSecurityProfile is set to the value you chose. - Note: The HAProxy Ingress controller image does not support TLS 1.3 - and because the Modern profile requires TLS 1.3, it is not supported. - The Ingress Operator converts the Modern profile to Intermediate. - The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, - and TLS 1.3 of a Custom profile to 1.2. + None required. By default, OpenShift sets this value to null and doesn't support overriding configuration + with unsupported features. scored: false - id: 1.3 text: "Controller Manager" checks: - id: 1.3.1 - text: "Ensure that garbage collection is configured as appropriate (Manual)" - type: manual - remediation: | - To configure, follow the directions in Configuring garbage collection for containers and images - https://docs.openshift.com/container-platform/4.15/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring - scored: false - - - id: 1.3.2 text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)" type: manual audit: | # Verify configuration for ports, livenessProbe, readinessProbe, healthz - oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[].livenessProbe' + oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[].readinessProbe' # Verify endpoints oc -n openshift-kube-controller-manager describe endpoints # Test to validate RBAC enabled on the controller endpoint; check with non-admin role - oc project openshift-kube-controller-manage POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') # Following should return 403 Forbidden @@ -1290,18 +1106,20 @@ groups: # Create a service account to test RBAC oc create -n openshift-kube-controller-manager sa permission-test-sa # Should return 403 Forbidden - SA_TOKEN=$(oc sa -n openshift-kube-controller-manager get-token permission-test-sa) + export SA_TOKEN=$(oc create token -n openshift-kube-controller-manager permission-test-sa) oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete -n openshift-kube-controller-manager sa permission-test-sa # As cluster admin, should succeed CLUSTER_ADMIN_TOKEN=$(oc whoami -t) oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + # Cleanup + unset CLUSTER_ADMIN_TOKEN POD PORT SA_TOKEN + oc delete -n openshift-kube-controller-manager sa permission-test-sa remediation: | - None required; profiling is protected by RBAC. + None required. By default, the operator exposes metrics via metrics service. The metrics are collected + from the OpenShift Controller Manager and the Kubernetes Controller Manager and protected by RBAC. scored: false - - id: 1.3.3 + - id: 1.3.2 text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)" audit: | echo use-service-account-credentials=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["use-service-account-credentials"][]'` @@ -1317,7 +1135,7 @@ groups: This operator is configured via KubeControllerManager custom resource. scored: false - - id: 1.3.4 + - id: 1.3.3 text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)" audit: | oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["service-account-private-key-file"][]' @@ -1329,7 +1147,7 @@ groups: OpenShift manages the service account credentials for the scheduler automatically. scored: false - - id: 1.3.5 + - id: 1.3.4 text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)" audit: | oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["root-ca-file"][]' @@ -1341,29 +1159,15 @@ groups: Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. scored: false - - id: 1.3.6 - text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: | - oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["feature-gates"][]' - tests: - test_items: - - flag: "RotateKubeletServerCertificate" - compare: - op: eq - value: "true" - remediation: | - None required. - Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. - scored: false - - - id: 1.3.7 + - id: 1.3.5 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Manual)" audit: | - echo port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["port"][]'` echo secure-port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["secure-port"][]'` #Following should fail with a http code 403 POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') oc rsh -n openshift-kube-controller-manager -c kube-controller-manager $POD curl https://localhost:10257/metrics -k + # Cleanup + unset POD tests: bin_op: and test_items: @@ -1371,14 +1175,10 @@ groups: compare: op: eq value: "\"10257\"" - - flag: "port" - compare: - op: eq - value: "\"0\"" - flag: "\"code\": 403" remediation: | Edit the Controller Manager pod specification file $controllermanagerconf - on the master node and ensure the correct value for the --bind-address parameter + on the master node and ensure the correct value for the --bind-address parameter. scored: false - id: 1.4 @@ -1389,57 +1189,57 @@ groups: type: manual audit: | # check configuration for ports, livenessProbe, readinessProbe, healthz - oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[].livenessProbe' + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[].readinessProbe' # Test to verify endpoints oc -n openshift-kube-scheduler describe endpoints # Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role - oc project openshift-kube-scheduler - POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') - PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + # oc project openshift-kube-scheduler + export POD=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + export PORT=$(oc get pod -n openshift-kube-scheduler $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') # Should return 403 Forbidden - oc rsh ${POD} curl http://localhost:${PORT}/metrics -k + oc rsh -n openshift-kube-scheduler ${POD} curl https://localhost:${PORT}/metrics -k # Create a service account to test RBAC - oc create sa permission-test-sa + oc create sa -n openshift-kube-scheduler permission-test-sa # Should return 403 Forbidden - SA_TOKEN=$(oc sa get-token permission-test-sa) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete sa permission-test-sa + export SA_TOKEN=$(oc create token -n openshift-kube-scheduler permission-test-sa) + oc rsh -n openshift-kube-scheduler ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + export CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + oc rsh -n openshift-kube-scheduler ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + # Cleanup + unset CLUSTER_ADMIN_TOKEN POD PORT SA_TOKEN + oc delete sa -n openshift-kube-scheduler permission-test-sa remediation: | - A fix to this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 None required. - Profiling is protected by RBAC and cannot be disabled. + None required. By default, profiling is enabled and protected by RBAC. scored: false - id: 1.4.2 - text: "Verify that the scheduler API service is protected by authentication and authorization (Manual)" + text: "Verify that the scheduler API service is protected by RBAC (Manual)" type: manual audit: | # To verify endpoints oc -n openshift-kube-scheduler describe endpoints # To verify that bind-adress is not used in the configuration and that port is set to 0 - oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' + oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers[]|select(.name=="kube-scheduler")|.args' # To test for RBAC: - oc project openshift-kube-scheduler - POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') - POD_IP=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}') - PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') + # oc project openshift-kube-scheduler + export POD=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') + export POD_IP=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}') + export PORT=$(oc get pod -n openshift-kube-scheduler $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') # Should return a 403 - oc rsh ${POD} curl http://${POD_IP}:${PORT}/metrics + oc rsh -n openshift-kube-scheduler ${POD} curl https://${POD_IP}:${PORT}/metrics # Create a service account to test RBAC - oc create sa permission-test-sa + oc create sa -n openshift-kube-scheduler permission-test-sa # Should return 403 Forbidden - SA_TOKEN=$(oc sa get-token permission-test-sa) - oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k - # Cleanup - oc delete sa permission-test-sa + export SA_TOKEN=$(oc create token -n openshift-kube-scheduler permission-test-sa) + oc rsh -n openshift-kube-scheduler ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k # As cluster admin, should succeed - CLUSTER_ADMIN_TOKEN=$(oc whoami -t) + export CLUSTER_ADMIN_TOKEN=$(oc whoami -t) oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k + # Cleanup + unset CLUSTER_ADMIN_TOKEN POD PORT SA_TOKEN + oc delete sa -n openshift-kube-scheduler permission-test-sa remediation: | - By default, the --bind-address argument is not present, - the readinessProbe and livenessProbe arguments are set to 10251 and the port argument is set to 0. - Check the status of this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 + By default, the --bind-address argument is not used and the metrics endpoint is protected by RBAC when using the pod IP address. scored: false From 1a2de3063b1ed6ad4c091086db1888e44e41e6f7 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Tue, 17 Sep 2024 16:11:55 +0530 Subject: [PATCH 06/13] Fixes for node TCs --- cfg/rh-1.6/node.yaml | 35 +++++++++++++---------------------- cmd/common_test.go | 1 + 2 files changed, 14 insertions(+), 22 deletions(-) diff --git a/cfg/rh-1.6/node.yaml b/cfg/rh-1.6/node.yaml index 62a3a396a..d9d065f32 100644 --- a/cfg/rh-1.6/node.yaml +++ b/cfg/rh-1.6/node.yaml @@ -119,7 +119,7 @@ groups: text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/cert/ca.pem 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null use_multiple_values: true tests: test_items: @@ -135,7 +135,7 @@ groups: text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/cert/ca.pem 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null use_multiple_values: true tests: test_items: @@ -148,7 +148,8 @@ groups: text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/data/kubelet/config.json 2> /dev/null + # default setups have the file present at /var/lib/kubelet only. + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/config.json /var/data/kubelet/config.json 2> /dev/null use_multiple_values: true tests: test_items: @@ -164,7 +165,8 @@ groups: text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/data/kubelet/config.json 2> /dev/null + # default setups have the file present at /var/lib/kubelet only. + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/config.json /var/data/kubelet/config.json 2> /dev/null use_multiple_values: true tests: test_items: @@ -231,10 +233,7 @@ groups: use_multiple_values: true tests: test_items: - - flag: clientCAFile - compare: - op: eq - value: /etc/kubernetes/kubelet-ca.crt + - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' remediation: | None required. Changing the clientCAFile value is unsupported. scored: true @@ -242,15 +241,10 @@ groups: - id: 4.2.5 text: "Verify that the read only port is not used or is set to 0 (Automated)" audit: | - oc -n openshift-kube-apiserver get cm config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments' 2> /dev/null - echo `oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o yaml | grep --color read-only-port` 2> /dev/null - echo `oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "read-only-port"` 2> /dev/null + oc -n openshift-kube-apiserver get cm config -o json | jq -r '.data."config.yaml"' | jq -r '.apiServerArguments."kubelet-read-only-port"[]' 2> /dev/null tests: test_items: - - flag: kubelet-read-only-port - compare: - op: has - value: "[\"0\"]" + - flag: '"0"' remediation: | In earlier versions of OpenShift 4, the read-only-port argument is not used. Follow the instructions in the documentation https://docs.openshift.com/container-platform/latest/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks @@ -262,7 +256,7 @@ groups: audit: | # Should return 1 for node NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' 2> /dev/null + echo streamingConnectionIdleTimeout=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.streamingConnectionIdleTimeout' 2> /dev/null) use_multiple_values: true tests: test_items: @@ -280,14 +274,11 @@ groups: audit: | # Should return 1 for node NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' 2> /dev/null + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.makeIPTablesUtilChains' 2> /dev/null use_multiple_values: true tests: test_items: - - flag: makeIPTablesUtilChains - compare: - op: eq - value: true + - flag: "true" remediation: | None required. The makeIPTablesUtilChains argument is set to true by default. scored: false @@ -296,7 +287,7 @@ groups: text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig' 2> /dev/null + echo kubeAPIQPS=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.kubeAPIQPS' 2> /dev/null) tests: test_items: - flag: kubeAPIQPS diff --git a/cmd/common_test.go b/cmd/common_test.go index 53793a000..e02e73224 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -248,6 +248,7 @@ func TestMapToCISVersion(t *testing.T) { {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, + {kubeVersion: "ocp-4.15", succeed: true, exp: "rh-1.6"}, {kubeVersion: "unknown", succeed: false, exp: "", expErr: "unable to find a matching Benchmark Version match for kubernetes version: unknown"}, } for _, c := range cases { From 77a1f3a7a0b60c5060d9afdc9b65ff8c9079bfa7 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Wed, 18 Sep 2024 00:43:10 +0530 Subject: [PATCH 07/13] Fixes for node and etcd TCs --- cfg/rh-1.0/etcd.yaml | 52 ++++++++++++++++++++++---------------------- cfg/rh-1.6/node.yaml | 30 ++++++++++++++++--------- 2 files changed, 46 insertions(+), 36 deletions(-) diff --git a/cfg/rh-1.0/etcd.yaml b/cfg/rh-1.0/etcd.yaml index 4398d9cc1..bdce05ba0 100644 --- a/cfg/rh-1.0/etcd.yaml +++ b/cfg/rh-1.0/etcd.yaml @@ -16,11 +16,11 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -28,7 +28,7 @@ groups: - flag: "file" compare: op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)' + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-certs\/etcd-serving-.*\.(?:crt|key)' remediation: | OpenShift does not use the etcd-certfile or etcd-keyfile flags. Certificates for etcd are managed by the etcd cluster operator. @@ -42,10 +42,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -67,10 +67,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? fi use_multiple_values: true tests: @@ -91,11 +91,11 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -103,7 +103,7 @@ groups: - flag: "file" compare: op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)' + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-certs\/etcd-peer-.*\.(?:crt|key)' remediation: | None. This configuration is managed by the etcd operator. scored: false @@ -116,10 +116,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -141,10 +141,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? fi use_multiple_values: true tests: @@ -165,11 +165,11 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' fi use_multiple_values: true tests: diff --git a/cfg/rh-1.6/node.yaml b/cfg/rh-1.6/node.yaml index d9d065f32..622905395 100644 --- a/cfg/rh-1.6/node.yaml +++ b/cfg/rh-1.6/node.yaml @@ -148,8 +148,13 @@ groups: text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - # default setups have the file present at /var/lib/kubelet only. - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/config.json /var/data/kubelet/config.json 2> /dev/null + # default setups have the file present at /var/lib/kubelet only. Custom setup is present at /var/data/kubelet/config.json. + oc debug node/$NODE_NAME -- /bin/sh -c ' + if [ -f /var/data/kubelet/config.json ]; then + chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/data/kubelet/config.json; + else + chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/config.json; + fi' 2> /dev/null use_multiple_values: true tests: test_items: @@ -165,8 +170,13 @@ groups: text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - # default setups have the file present at /var/lib/kubelet only. - oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/config.json /var/data/kubelet/config.json 2> /dev/null + # default setups have the file present at /var/lib/kubelet only. Custom setup is present at /var/data/kubelet/config.json. + oc debug node/$NODE_NAME -- /bin/sh -c ' + if [ -f /var/data/kubelet/config.json ]; then + chroot /host stat -c "$NODE_NAME %n %U:%G" /var/data/kubelet/config.json; + else + chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/config.json; + fi' 2> /dev/null use_multiple_values: true tests: test_items: @@ -229,11 +239,11 @@ groups: text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.authentication.x509' 2> /dev/null + oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig.authentication.x509.clientCAFile' 2> /dev/null use_multiple_values: true tests: test_items: - - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' + - flag: '/etc/kubernetes/kubelet-ca.crt' remediation: | None required. Changing the clientCAFile value is unsupported. scored: true @@ -244,7 +254,7 @@ groups: oc -n openshift-kube-apiserver get cm config -o json | jq -r '.data."config.yaml"' | jq -r '.apiServerArguments."kubelet-read-only-port"[]' 2> /dev/null tests: test_items: - - flag: '"0"' + - flag: '0' remediation: | In earlier versions of OpenShift 4, the read-only-port argument is not used. Follow the instructions in the documentation https://docs.openshift.com/container-platform/latest/post_installation_configuration/machine-configuration-tasks.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters_post-install-machine-configuration-tasks @@ -319,7 +329,7 @@ groups: text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" audit: | NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig' 2> /dev/null + echo rotateCertificates=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig.rotateCertificates' 2> /dev/null) use_multiple_values: true tests: test_items: @@ -336,9 +346,9 @@ groups: audit: | #Verify the rotateKubeletServerCertificate feature gate is on NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') - oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.featureGates' 2> /dev/null + echo RotateKubeletServerCertificate=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.featureGates.RotateKubeletServerCertificate' 2> /dev/null) # Verify the rotateCertificates argument is set to true - oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig' 2> /dev/null + echo rotateCertificates=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig.rotateCertificates' 2> /dev/null) use_multiple_values: true tests: bin_op: or From 3bce11707153761e9bac2d9e2e04dc2e64c38c27 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Wed, 18 Sep 2024 00:50:34 +0530 Subject: [PATCH 08/13] Revert incorrect changes done in rh-1.0 etcd TCs --- cfg/rh-1.0/etcd.yaml | 52 ++++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/cfg/rh-1.0/etcd.yaml b/cfg/rh-1.0/etcd.yaml index bdce05ba0..4398d9cc1 100644 --- a/cfg/rh-1.0/etcd.yaml +++ b/cfg/rh-1.0/etcd.yaml @@ -16,11 +16,11 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -28,7 +28,7 @@ groups: - flag: "file" compare: op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-certs\/etcd-serving-.*\.(?:crt|key)' + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)' remediation: | OpenShift does not use the etcd-certfile or etcd-keyfile flags. Certificates for etcd are managed by the etcd cluster operator. @@ -42,10 +42,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -67,10 +67,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? fi use_multiple_values: true tests: @@ -91,11 +91,11 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -103,7 +103,7 @@ groups: - flag: "file" compare: op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-certs\/etcd-peer-.*\.(?:crt|key)' + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)' remediation: | None. This configuration is managed by the etcd operator. scored: false @@ -116,10 +116,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' fi use_multiple_values: true tests: @@ -141,10 +141,10 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? fi use_multiple_values: true tests: @@ -165,11 +165,11 @@ groups: # Get the pod name in the openshift-etcd namespace POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) if [ -z "$POD_NAME" ]; then - echo "No matching file found on the current node." + echo "No matching file found on the current node." else - # Execute the stat command - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' - oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' fi use_multiple_values: true tests: From 1ae58e3e97bcc6d2a3a440ff3fed793a5b2aee04 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Wed, 18 Sep 2024 03:03:29 +0530 Subject: [PATCH 09/13] Add fixes in master TCs and docs --- cfg/rh-1.6/master.yaml | 88 +++++++++++++++++------------------------- docs/platforms.md | 4 +- docs/running.md | 4 +- 3 files changed, 40 insertions(+), 56 deletions(-) diff --git a/cfg/rh-1.6/master.yaml b/cfg/rh-1.6/master.yaml index 528c07926..b025a5dea 100644 --- a/cfg/rh-1.6/master.yaml +++ b/cfg/rh-1.6/master.yaml @@ -220,20 +220,15 @@ groups: # For CNI multus # Get the pod name in the openshift-multus namespace POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else + if [ -n "$POD_NAME" ]; then # Execute the stat command - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null - oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf" 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null fi + # For SDN pods POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else + if [ -n "$POD_NAME" ]; then # Execute the stat command oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null @@ -241,10 +236,7 @@ groups: # For OVS pods POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else + if [ -n "$POD_NAME" ]; then # Execute the stat command oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null @@ -266,33 +258,27 @@ groups: audit: | # Get the node name where the pod is running NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # For CNI multus # Get the pod name in the openshift-multus namespace POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else + if [ -n "$POD_NAME" ]; then # Execute the stat command oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null fi + # For SDN pods POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else + if [ -n "$POD_NAME" ]; then # Execute the stat command oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null fi + # For OVS pods in 4.5 POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - - if [ -z "$POD_NAME" ]; then - echo "No matching pods found on the current node." - else + if [ -n "$POD_NAME" ]; then # Execute the stat command oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null @@ -613,9 +599,9 @@ groups: text: "Ensure that the --token-auth-file parameter is not set (Manual)" audit: | # Verify that the token-auth-file flag is not present - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep --color "token-auth-file" + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep --color "token-auth-file" + oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' | grep --color "token-auth-file" #Verify that the authentication operator is running oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' tests: @@ -639,8 +625,8 @@ groups: tests: bin_op: and test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" + - flag: "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/kubelet-client/tls.key" remediation: | No remediation is required. OpenShift platform components use X.509 certificates for authentication. @@ -655,8 +641,8 @@ groups: tests: bin_op: and test_items: - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" - - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.crt" + - flag: "/etc/kubernetes/static-pod-certs/secrets/kubelet-client/tls.key" remediation: | No remediation is required. OpenShift platform components use X.509 certificates for authentication. @@ -681,13 +667,11 @@ groups: - id: 1.2.7 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments."authorization-mode"[]' tests: test_items: - - path: "{.authorization-mode}" - compare: - op: nothave - value: "AlwaysAllow" + - flag: "AlwaysAllow" + set: false remediation: | None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode. scored: false @@ -695,13 +679,10 @@ groups: - id: 1.2.8 text: "Verify that RBAC is enabled (Manual)" audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments."authorization-mode"[]' tests: test_items: - - path: "{.authorization-mode}" - compare: - op: has - value: "RBAC" + - flag: "RBAC" remediation: | None. It is not possible to disable RBAC. scored: false @@ -824,17 +805,20 @@ groups: - id: 1.2.18 text: "Ensure that the --secure-port argument is not set to 0 (Manual)" audit: | - oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.bindAddress' + echo bindAddress=$(oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.servingInfo.bindAddress') # Should return only 6443 - echo ports=`oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}'` + echo ports=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}') tests: bin_op: and test_items: - - flag: '"bindAddress": "0.0.0.0:6443"' + - flag: 'bindAddress' + compare: + op: eq + value: '"0.0.0.0:6443"' - flag: "ports" compare: - op: regex - value: '\s*(?:6443\s*){1,}$' + op: eq + value: '6443' remediation: | None required. By default, the openshift-kube-apiserver is served over HTTPS with authentication and authorization; the secure API endpoint is bound to 0.0.0.0:6443. @@ -884,15 +868,15 @@ groups: oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null # Should return 0 echo apiserver_exit_code=$? - use_multiple_values: true tests: - bin_op: or + bin_op: and test_items: - flag: "/var/log/kube-apiserver/audit.log" - - flag: "/var/log/openshift-apiserver/audit.log" + - flag: "/var/log/kube-apiserver/audit.log" # This is needed for second printing in ls command. - flag: "kube_apiserver_exit_code=0" + - flag: "/var/log/openshift-apiserver/audit.log" + - flag: "/var/log/openshift-apiserver/audit.log" # This is needed for second printing in ls command. - flag: "apiserver_exit_code=0" - - flag: "null" remediation: | None required. This is managed by the cluster apiserver operator. By default, auditing is enabled. scored: false diff --git a/docs/platforms.md b/docs/platforms.md index be85f3978..543ed3f7d 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -27,8 +27,8 @@ Some defined by other hardenening guides. | CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | | CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | | RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | -| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1-15 | -| CIS | [OCP4 1.6.0](https://workbench.cisecurity.org/benchmarks/16094) | rh-1.6 | OCP 4.16- | +| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1-4.14 | +| CIS | [OCP4 1.6.0](https://workbench.cisecurity.org/benchmarks/16094) | rh-1.6 | OCP 4.15- | | CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | | DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | | CIS | [TKGI 1.2.53](https://network.pivotal.io/products/p-compliance-scanner#/releases/1248397) | tkgi-1.2.53 | vmware | diff --git a/docs/running.md b/docs/running.md index e792fadbe..102ea1fd4 100644 --- a/docs/running.md +++ b/docs/running.md @@ -134,8 +134,8 @@ docker push .dkr.ecr..amazonaws.com/k8s/kube-bench: | OpenShift Hardening Guide | kube-bench config | |---------------------------|-------------------| | ocp-3.10 + | rh-0.7 | -| ocp-4.1-4.15 | rh-1.0 | -| ocp-4.16 + | rh-1.6 | +| ocp-4.1-4.14 | rh-1.0 | +| ocp-4.15 + | rh-1.6 | kube-bench includes a set of test files for Red Hat's OpenShift hardening guide for OCP 3.10 and 4.1. To run this you will need to specify `--benchmark rh-07`, or `--version ocp-3.10` or,`--version ocp-4.5` or `--benchmark rh-1.0` From ff650d04fd3aa29581b48f5457c5b4d9240c6859 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Wed, 18 Sep 2024 23:00:53 +0530 Subject: [PATCH 10/13] Add fixes in node and etcd TCs --- cfg/rh-1.6/etcd.yaml | 6 ++++-- cfg/rh-1.6/node.yaml | 7 +++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cfg/rh-1.6/etcd.yaml b/cfg/rh-1.6/etcd.yaml index 674e21fad..4b589ff54 100644 --- a/cfg/rh-1.6/etcd.yaml +++ b/cfg/rh-1.6/etcd.yaml @@ -28,7 +28,8 @@ groups: - flag: "file" compare: op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-serving\/etcd-serving-.*\.(?:crt|key)' + # some systems have certs in directory '/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs' + value: \/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(?:serving|certs)\/etcd-serving-.*\.(?:crt|key) remediation: | OpenShift does not use the etcd-certfile or etcd-keyfile flags. Certificates for etcd are managed by the etcd cluster operator. @@ -103,7 +104,8 @@ groups: - flag: "file" compare: op: regex - value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-peer\/etcd-peer-.*\.(?:crt|key)' + # some systems have certs in directory '/etc/kubernetes/static-pod-certs/secrets/etcd-all-certs' + value: '\/etc\/kubernetes\/static-pod-certs\/secrets\/etcd-all-(?:peer|certs)\/etcd-peer-.*\.(?:crt|key)' remediation: | None. This configuration is managed by the etcd operator. scored: false diff --git a/cfg/rh-1.6/node.yaml b/cfg/rh-1.6/node.yaml index 622905395..f7ac9ca6c 100644 --- a/cfg/rh-1.6/node.yaml +++ b/cfg/rh-1.6/node.yaml @@ -349,15 +349,14 @@ groups: echo RotateKubeletServerCertificate=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq '.kubeletconfig.featureGates.RotateKubeletServerCertificate' 2> /dev/null) # Verify the rotateCertificates argument is set to true echo rotateCertificates=$(oc get --raw /api/v1/nodes/$NODE_NAME/proxy/configz | jq -r '.kubeletconfig.rotateCertificates' 2> /dev/null) - use_multiple_values: true tests: - bin_op: or + bin_op: and test_items: - - flag: rotateCertificates + - flag: RotateKubeletServerCertificate compare: op: eq value: true - - flag: RotateKubeletServerCertificate + - flag: rotateCertificates compare: op: eq value: true From 50a8736c9c1897f4c7cfbb5a6bc351bda4c94e11 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Wed, 18 Sep 2024 23:26:07 +0530 Subject: [PATCH 11/13] Fix test --- cmd/util_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util_test.go b/cmd/util_test.go index 92ddeb879..0d0477b44 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -758,7 +758,7 @@ func Test_getOcpValidVersion(t *testing.T) { {openShiftVersion: "4.1", succeed: true, exp: "4.1"}, {openShiftVersion: "4.5", succeed: true, exp: "4.1"}, {openShiftVersion: "4.6", succeed: true, exp: "4.1"}, - {openShiftVersion: "4.16", succeed: true, exp: "4.16"}, + {openShiftVersion: "4.16", succeed: true, exp: "4.15"}, {openShiftVersion: "invalid", succeed: false, exp: ""}, } for _, c := range cases { From 4984b11428a0eabbda77dae7e684bafcfec4ac99 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Thu, 19 Sep 2024 13:23:08 +0530 Subject: [PATCH 12/13] Add job template for OCP --- job-ocp.yaml | 106 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 job-ocp.yaml diff --git a/job-ocp.yaml b/job-ocp.yaml new file mode 100644 index 000000000..cd05ecccc --- /dev/null +++ b/job-ocp.yaml @@ -0,0 +1,106 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + metadata: + labels: + app: kube-bench + spec: + serviceAccountName: kube-bench + automountServiceAccountToken: true + containers: + - command: ["kube-bench"] + image: docker.io/aquasec/kube-bench:latest + name: kube-bench + volumeMounts: + - name: var-lib-cni + mountPath: /var/lib/cni + readOnly: true + - mountPath: /var/lib/etcd + name: var-lib-etcd + readOnly: true + - mountPath: /var/lib/kubelet + name: var-lib-kubelet + readOnly: true + - mountPath: /var/lib/kube-scheduler + name: var-lib-kube-scheduler + readOnly: true + - mountPath: /var/lib/kube-controller-manager + name: var-lib-kube-controller-manager + readOnly: true + - mountPath: /etc/systemd + name: etc-systemd + readOnly: true + - mountPath: /lib/systemd/ + name: lib-systemd + readOnly: true + - mountPath: /srv/kubernetes/ + name: srv-kubernetes + readOnly: true + - mountPath: /etc/kubernetes + name: etc-kubernetes + readOnly: true + - mountPath: /usr/local/mount-from-host/bin + name: usr-bin + readOnly: true + - mountPath: /etc/cni/net.d/ + name: etc-cni-netd + readOnly: true + - mountPath: /opt/cni/bin/ + name: opt-cni-bin + readOnly: true + - name: etc-passwd + mountPath: /etc/passwd + readOnly: true + - name: etc-group + mountPath: /etc/group + readOnly: true + + hostPID: true + restartPolicy: Never + volumes: + - name: var-lib-cni + hostPath: + path: /var/lib/cni + - hostPath: + path: /var/lib/etcd + name: var-lib-etcd + - hostPath: + path: /var/lib/kubelet + name: var-lib-kubelet + - hostPath: + path: /var/lib/kube-scheduler + name: var-lib-kube-scheduler + - hostPath: + path: /var/lib/kube-controller-manager + name: var-lib-kube-controller-manager + - hostPath: + path: /etc/systemd + name: etc-systemd + - hostPath: + path: /lib/systemd + name: lib-systemd + - hostPath: + path: /srv/kubernetes + name: srv-kubernetes + - hostPath: + path: /etc/kubernetes + name: etc-kubernetes + - hostPath: + path: /usr/bin + name: usr-bin + - hostPath: + path: /etc/cni/net.d/ + name: etc-cni-netd + - hostPath: + path: /opt/cni/bin/ + name: opt-cni-bin + - hostPath: + path: "/etc/passwd" + name: etc-passwd + - hostPath: + path: "/etc/group" + name: etc-group From c8d2de508ae4401bc1881b3ecd09e249f1926822 Mon Sep 17 00:00:00 2001 From: Deepanshu Bhatia Date: Mon, 14 Oct 2024 23:18:32 +0530 Subject: [PATCH 13/13] Add accidentally removed gke entry --- docs/architecture.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/architecture.md b/docs/architecture.md index 3ed01527e..9d4684a5b 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -25,6 +25,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | cis-1.9 | master, controlplane, node, etcd, policies | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | +| gke-1.6.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | | eks-1.1.0 | controlplane, node, policies, managedservices | | eks-1.2.0 | controlplane, node, policies, managedservices |