diff --git a/cfg/cis-1.10/policies.yaml b/cfg/cis-1.10/policies.yaml index 10692f703..615e60d15 100644 --- a/cfg/cis-1.10/policies.yaml +++ b/cfg/cis-1.10/policies.yaml @@ -192,7 +192,7 @@ groups: text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" type: "manual" remediation: | - Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + Where possible, remove access to the approval sub-resource of certificatesigningrequests objects. scored: false - id: 5.1.12 @@ -230,7 +230,24 @@ groups: - id: 5.2.3 text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" - type: "manual" + audit: | + kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace + do + pod_hostpid=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostPID}' 2>/dev/null) + if [ -z "${pod_hostpid}" ]; then + pod_hostpid="false" + echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: true" + else + echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostpid: ${pod_hostpid} is_compliant: false" + fi + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers. @@ -238,7 +255,24 @@ groups: - id: 5.2.4 text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" - type: "manual" + audit: | + kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace + do + pod_hostipc=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostIPC}' 2>/dev/null) + if [ -z "${pod_hostipc}" ]; then + pod_hostipc="false" + echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: true" + else + echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostipc: ${pod_hostipc} is_compliant: false" + fi + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers. @@ -246,18 +280,61 @@ groups: - id: 5.2.5 text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" - type: "manual" + audit: | + kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace + do + pod_hostnetwork=$(kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o jsonpath='{.spec.hostNetwork}' 2>/dev/null) + if [ -z "${pod_hostnetwork}" ]; then + pod_hostnetwork="false" + echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: true" + else + echo "***pod_name: ${pod_name} pod_namespace: ${pod_namespace} is_pod_hostnetwork: ${pod_hostnetwork} is_compliant: false" + fi + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers. + Condition: is_compliant is false if container's `.spec.allowPrivilegeEscalation` is set to `true`. scored: false - id: 5.2.6 text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" - type: "manual" + audit: | + kubectl get pods --all-namespaces -o custom-columns=POD_NAME:.metadata.name,POD_NAMESPACE:.metadata.namespace --no-headers | while read -r pod_name pod_namespace + do + kubectl get pod "${pod_name}" --namespace "${pod_namespace}" -o json | jq -c '.spec.containers[]' | while read -r container + do + # Retrieve container's name + container_name=$(echo ${container} | jq -r '.name') + # Retrieve container's .securityContext.allowPrivilegeEscalation + container_allowprivesc=$(echo ${container} | jq -r '.securityContext.allowPrivilegeEscalation' | sed -e 's/null/notset/g') + if [ "${container_allowprivesc}" = "false" ] || [ "${container_allowprivesc}" = "notset" ]; then + echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: true" + else + echo "***pod_name: ${pod_name} container_name: ${container_name} pod_namespace: ${pod_namespace} is_container_allowprivesc: ${container_allowprivesc} is_compliant: false" + fi + done + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + Condition: is_compliant is false if container's `.spec.allowPrivilegeEscalation` is set to `true`. + If notset, privilege escalation is allowed (default to true). However if PSP/PSA is used with a `restricted` profile, + privilege escalation is explicitly disallowed unless configured otherwise. scored: false - id: 5.2.7